Example #1
0
def stylize(args):
    device = torch.device("cuda" if args.is_cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    # print(content_image.size)
    # ss('stop')
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)
    print(content_image.shape)
    # ss('stop')

    with torch.no_grad():
        print(1)
        style_model = TransformerNet()
        print(2)
        state_dict = torch.load(args.model)
        print(3)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        # for k in list(state_dict.keys()):
        #     if re.search(r'in\d+\.running_(mean|var)$', k):
        #         del state_dict[k]
        print(4)
        style_model.load_state_dict(state_dict)
        print(5)
        style_model.to(device)
        print(6)

        output = style_model(content_image).cpu()
        print(output.shape)
        # ss('s')
    utils.save_image(args.output_image, output[0])
Example #2
0
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image,
                                     scale=args.content_scale)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(
                    ".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image,
                                            args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0])
Example #3
0
def stylize(**kwargs):
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)
    device = t.device('cuda') if opt.use_gpu else t.device('cpu')

    # 图片处理
    content_image = tv.datasets.folder.default_loader(opt.content_path)
    content_transform = tv.transforms.Compose(
        [tv.transforms.ToTensor(),
         tv.transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device).detach()

    # 模型
    style_model = TransformerNet().eval()
    style_model.load_state_dict(
        t.load(opt.model_path, map_location=lambda _s, _: _s))
    style_model.to(device)

    # 风格迁移与保存
    output = style_model(content_image)
    output_data = output.cpu().data[0]
    tv.utils.save_image(((output_data / 255)).clamp(min=0, max=1),
                        opt.result_path)
def stylize(content_image_path, pathout, model):
    r"""
    :param content_image_path: the path of content image
            model: path of the model, default:./saved_models/starry-night.model
    :return: saved stylize_image
    """
    device = torch.device("cpu")
    # args.content_image='../images/content-images/test1.jpg'

    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = Image.open(content_image_path)
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(model)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image).cpu()
        img = output[0].clone().clamp(0, 255).numpy()
        img = img.transpose(1, 2, 0).astype("uint8")
        img = Image.fromarray(img)
        img.save(pathout)
def generate_stylized_image(args):
    """
    Creates stylized image based on the arguments passed
    :param args: (content_image, style_image, model, etc.)
    :return: void
    """
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=content_scale)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    with torch.no_grad():
        style_model = TransformerNet(style_number=args.style_num)
        state_dict = torch.load(args.model)
        style_model.load_state_dict(state_dict, strict=False)
        style_model.to(device)
        output = style_model(content_image, style_id=[args.style_id]).cpu()

    utils.save_image(
        'output/' + args.output_image + '_style' + str(args.style_id) + '.jpg',
        output[0])
Example #6
0
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image,
                                     scale=args.content_scale)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)
    style_id = torch.LongTensor([args.style_id]).to(device)

    with torch.no_grad():
        import time
        start = time.time()
        style_model = TransformerNet(style_num=args.style_num)
        state_dict = torch.load(args.model)
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model([content_image, style_id]).cpu()
        end = time.time()
        print('Time={}'.format(end - start))
    if args.export_onnx:
        assert args.export_onnx.endswith(
            ".onnx"), "Export model file should end with .onnx"
        output = torch.onnx._export(style_model, [content_image_t, style_t],
                                    args.export_onnx,
                                    input_names=['input_image', 'style_index'],
                                    output_names=['output_image']).cpu()

    utils.save_image(
        'output/' + args.output_image + '_style' + str(args.style_id) + '.jpg',
        output[0])
Example #7
0
def stylize(**kwargs):
    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)
    device=t.device('cuda') if opt.use_gpu else t.device('cpu')
    
    # 图片处理
    content_image = tv.datasets.folder.default_loader(opt.content_path)
    content_transform = tv.transforms.Compose([
        tv.transforms.ToTensor(),
        tv.transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device).detach()

    # 模型
    style_model = TransformerNet().eval()
    style_model.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))
    style_model.to(device)

    # 风格迁移与保存
    output = style_model(content_image)
    output_data = output.cpu().data[0]
    tv.utils.save_image(((output_data / 255)).clamp(min=0, max=1), opt.result_path)
Example #8
0
def stylize_one(style_model_path, target_image):
    content_image = utils.load_image(target_image)
    # print('content_image', content_image)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)
    with torch.no_grad():

        style_model = TransformerNet()
        state_dict = torch.load(style_model_path)

        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]

        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image)
        data = output[0].clamp(0, 255)
        # torchvision.utils.save_image(data, './1.png', normalize=True)
        img = data.cpu().clone().clamp(0, 255).numpy()
        img = img.transpose(1, 2, 0).astype("uint8")
        img = Image.fromarray(img)
    return img, data
Example #9
0
def stylize():
    device = torch.device("cpu")
    input_img = request.args.get('input_img')
    model_get = request.args.get('style')
    print(model_get)

    content_image = utils.load_image(input_img)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)
    model_get = str(model_get)
    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(model_get)
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image).cpu()
        a = random.randint(1, 101)
        img_path = str("static/images/output_{}.jpg".format(a))
    utils.save_image(img_path, output[0])
    image_k = str("output_{}.jpg".format(a))

    get_image = os.path.join(app.config['UPLOAD_FOLDER'], image_k)

    print("Done")

    return render_template("index.html", get_image=get_image)
Example #10
0
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0])
Example #11
0
def stylize(args):
    device = torch.device("cpu")

    content_image = utils.load_image(args['content_image'], scale=None)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args['model'].endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args['model'])
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            output = style_model(content_image).cpu()
    utils.save_image(args["output_image"], output[0])
Example #12
0
    def convert(self, orig_image):
        #pil_img = Image.fromarray(orig_image)
        pil_img = orig_image

        content_transform = transforms.Compose(
            [transforms.ToTensor(),
             transforms.Lambda(lambda x: x.mul(255))])
        content_image = content_transform(pil_img)
        content_image = content_image.unsqueeze(0).to(self.device)

        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(self.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(self.device)

            rospy.loginfo('stylizing image ...')
            output = style_model(content_image).cpu()
            img = output[0].clone().clamp(0, 255).numpy()
            img = img.transpose(1, 2, 0).astype("uint8")

            img = cv2.addWeighted(
                orig_image, self.alpha, img[0:orig_image.shape[0],
                                            0:orig_image.shape[1]],
                1 - self.alpha, 0.0)

            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
            return img
Example #13
0
def vectorize(args):
    size = args.size
    # vectors = np.zeros((size, size, 2), dtype=np.float32)
    # for y in range(size):
    #     for x in range(size):
    #         xx = float(x - size / 2)
    #         yy = float(y - size / 2)
    #         rsq = xx ** 2 + yy ** 2
    #         if (rsq == 0):
    #             vectors[y, x, 0] = 1
    #             vectors[y, x, 1] = 1
    #         else:
    #             vectors[y, x, 0] = -yy / rsq
    #             vectors[y, x, 1] = xx / rsq
    # vectors = NormalizVectrs(vectors)

    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = Image.open(args.content_image).convert('L')
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0)
    content_image = utils.subtract_imagenet_mean_batch(content_image)
    content_image = content_image.to(device)

    with torch.no_grad():
        vectorize_model = TransformerNet()
        state_dict = torch.load(args.saved_model)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                pdb.set_trace()
                del state_dict[k]
        vectorize_model.load_state_dict(state_dict)
        vectorize_model.to(device)
        output = vectorize_model(content_image)

    target = dataset.hdf5_loader(args.target_vector)
    target_transform = transforms.ToTensor()
    target = target_transform(target)
    target = target.unsqueeze(0).to(device)

    cosine_loss = torch.nn.CosineEmbeddingLoss()
    label = torch.ones(1, 1, args.size, args.size).to(device)
    loss = cosine_loss(output, target, label)
    print(loss.item())

    output = output.cpu().clone().numpy()[0].transpose(1, 2, 0)
    output = NormalizVectrs(output)
    lic(output, "output.jpg")

    target = target.cpu().clone().numpy()[0].transpose(1, 2, 0)
    lic(target, "target.jpg")
Example #14
0
def stylize(args):
    content_image = load_image_eval(args.content_image)
    with flow.no_grad():
        style_model = TransformerNet()
        state_dict = flow.load(args.model)
        style_model.load_state_dict(state_dict)
        style_model.to("cuda")
        output = style_model(
            flow.Tensor(content_image).clamp(0, 255).to("cuda"))
    print(args.output_image)
    cv2.imwrite(args.output_image, recover_image(output.numpy()))
Example #15
0
def multi_style(path, width=320, device=device):
    model_iter = itertools.cycle(os.listdir(path))
    model_file = next(model_iter)
    print(f'Using {model_file} ')
    model_path = os.path.join(path, model_file)
    model = TransformerNet()
    model.load_state_dict(read_state_dict(model_path))
    model.to(device)

    monitor_width = get_monitors()[0].width
    monitor_height = get_monitors()[0].height

    vs = VideoStream(src=0).start()
    time.sleep(2.0)
    timer = Timer()
    last_update = int(time.time())
    while (True):
        frame = vs.read()
        if frame is None:
            frame = np.random.randint(0,
                                      255, (int(width / 1.5), width, 3),
                                      dtype=np.uint8)
        frame = cv2.flip(frame, 1)
        frame = resize(frame, width=width)

        # Style the frame
        img = style_frame(frame, model, device).numpy()
        img = np.clip(img, 0, 255)
        img = img.astype(np.uint8)

        img = img.transpose(1, 2, 0)
        img = cv2.resize(img[:, :, ::-1], (monitor_width, monitor_height))

        # print(img.shape)
        cv2.imshow("Output", img)
        timer()

        #Determine if n key has been selected or if the time since last rotation
        #is greater than defined rotation constraint
        key = cv2.waitKey(1) & 0xFF
        time_since_last_update = int(time.time()) - last_update

        rotate_by_key = key == ord("n")
        rotate_by_time = (time_since_last_update >= rotate_time)

        if rotate_by_time or rotate_by_key:
            model_file = next(model_iter)
            print(f'Using {model_file} ')
            model_path = os.path.join(path, model_file)
            model.load_state_dict(read_state_dict(model_path))
            model.to(device)
            last_update = int(time.time())
        elif key == ord("q"):
            break
Example #16
0
def load_model(model_path):
    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(model_path)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        style_model.eval()
        return style_model
Example #17
0
def get_output(trained_model, content_image):
    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(trained_model)
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image).cpu()
        # utils.save_image(args.output_image, output[0])
    return output
Example #18
0
def load_model(model_path):
    print('cargando modelo')
    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(model_path)
        # eliminamos las claves guardadas 'running_*' que estan decrapeadas en
        # InstanceNorm del checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        style_model.eval()
        return style_model
Example #19
0
def multi_style(path,
                width=320,
                device=device,
                cycle_length=np.inf,
                half_precision=False,
                rotate=0):
    model_iter = itertools.cycle(os.listdir(path))
    model_file = next(model_iter)
    print(f'Using {model_file} ')
    model_path = os.path.join(path, model_file)
    model = TransformerNet()
    model.load_state_dict(read_state_dict(model_path))
    model.to(device)
    if half_precision:
        model.half()
    vs = VideoStream(src=0).start()
    time.sleep(2.0)
    timer = Timer()
    cycle_begin = time.time()
    while (True):
        frame = vs.read()
        if frame is None:
            frame = np.random.randint(0,
                                      255, (int(width / 1.5), width, 3),
                                      dtype=np.uint8)
        frame = cv2.flip(frame, 1)
        frame = resize(frame, width=width)

        # Style the frame
        img = style_frame(frame, model, device, half_precision)
        img = cv2.resize(img[:, :, ::-1], (640, 480))

        # rotate
        if rotate > 0:
            img = cv2.rotate(img, cv2.ROTATE_90_CLOCKWISE)
        elif rotate < 0:
            img = cv2.rotate(img, cv2.ROTATE_90_COUNTERCLOCKWISE)
        # print(img.shape)
        cv2.imshow("Output", img)
        timer()
        key = cv2.waitKey(1) & 0xFF
        if key == ord("n") or (time.time() - cycle_begin) > cycle_length:
            model_file = next(model_iter)
            print(f'Using {model_file} ')
            model_path = os.path.join(path, model_file)
            model.load_state_dict(read_state_dict(model_path))
            model.to(device)
            cycle_begin = time.time()
        elif key == ord("q"):
            break
Example #20
0
    def transfer_style(self, content_img_stream):
        content_image = self._process_image(content_img_stream)

        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load('rain_princess.pth')
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(self.device)
            output = self.style_model(content_image).cpu()
            output = numpy.array(output.squeeze(0))
            output = output.transpose(1, 2, 0).astype("uint8")
            return misc.toimage(output)
Example #21
0
def run_feedforward_texture_transfer(args):

    print('running feedforward neural style transfer...')

    content_image = load_image(args.content_image,
                               mask=False,
                               size=args.image_size,
                               scale=None,
                               square=False)
    content_image = preprocess(content_image)

    input_image = content_image

    in_channels = 3

    stylizing_net = TransformerNet(in_channels)
    state_dict = torch.load(args.style_model)

    for k in list(state_dict.keys()):
        if re.search(r'in\d+\/running_(mean|var)$', k):
            del state_dict[k]
    del k
    stylizing_net.load_state_dict(state_dict)
    stylizing_net = stylizing_net.to(device)

    output = stylizing_net(input_image)

    if args.original_colors == 1:
        output = original_colors(content_image.cpu(), output)
    save_image(filename=args.output_image, data=output.detach())
Example #22
0
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")
    content_transform = transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Lambda(lambda x: x.mul(255))
                        ])


    print('Reading bag file '+ os.getcwd() + '/' + args.content_image
          + ' topic ' + args.topic)
    readbag = rosbag.Bag(args.content_image,'r')
    bridge = CvBridge()
    cv_img = []

    with rosbag.Bag(args.output_image,'w') as outbag:
        for topic, msg, dtime in readbag.read_messages():

            if topic == args.topic:
                cv_img = bridge.imgmsg_to_cv2(msg, desired_encoding="rgb8")
                (h, w, d) = img.shape

                nh = h/args.content_scale
                nw = w/args.content_scale
                cv_img = cv2.resize((h, w), (int(nh),int(nw)))

                with torch.no_grad():
                    style_model = TransformerNet()
                    state_dict = torch.load(args.model)
                    # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
                    for k in list(state_dict.keys()):
                        if re.search(r'in\d+\.running_(mean|var)$', k):
                            del state_dict[k]
                    style_model.load_state_dict(state_dict)
                    style_model.to(device)
                    if args.export_onnx:
                        assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                        output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu()
                    else:
                        print('stylizing image ...')
                        output = style_model(cv_img).cpu()
            else:
                outbag.write(topic, msg, msg.header.stamp if msg._has_header else t)

    readbag.close()
Example #23
0
    def transfer_style(self, content_img_stream, model_name):
        device = torch.device("cpu")

        content_image = self.process_image(content_img_stream)

        with torch.no_grad():
            style_model = TransformerNet()
            # ниже нужно указать путь до папки, где хранятся модели
            # base_dir = './saved_models/'
            base_dir = '/Users/yanadm/Documents/Style Transfer Bot/saved_models/'
            filename = model_name
            path_to_model = os.path.join(base_dir, filename)
            state_dict = torch.load(path_to_model)
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            output = style_model(content_image).cpu()    
        return misc.toimage(output[0])
Example #24
0
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)


    with torch.no_grad():
        style_model = TransformerNet(style_num=args.style_num)
        state_dict = torch.load(args.model)
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image, style_id = [args.style_id]).cpu()

    utils.save_image('output/'+args.output_image+'_style'+str(args.style_id)+'.jpg', output[0])
Example #25
0
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image_local(args.content_image)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(args.model)
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0])
class depthStyleDataset(Dataset):
    """Face Landmarks dataset."""

    def __init__(self, csv_file, transform=None, modelname = "mosaic"):
        self.frame = pd.read_csv(csv_file, header=None)
        self.transform = transform
        self.modelname = modelname
        
        self.style_model = TransformerNet()
        modelpath = "saved_models/"+self.modelname+".pth"
        state_dict = torch.load(modelpath)
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        self.style_model.load_state_dict(state_dict)
        self.style_model.to(device)        

    def __getitem__(self, idx):
        image_name = self.frame.ix[idx, 0]
        depth_name = self.frame.ix[idx, 1]

        #image = Image.open(image_name)
        depth = Image.open(depth_name)
        image = matplotlib.image.imread(image_name)
        #depth = matplotlib.image.imread(depth_name)
        #print(np.array(depth)[2])
        with torch.no_grad():
                output = single_stylize(self.style_model, image)

        #output = Image.fromarray(output)
        output = Image.fromarray(np.uint8(output))
        #visualize(output, depth)
        sample = {'image': output, 'depth': depth}

        if self.transform:
            sample = self.transform(sample)

        return sample

    def __len__(self):
        return len(self.frame)
Example #27
0
def style_data():
    style = request.form.get('style')
    modelname = {
        'X': 'hiphop.pth',
        'A': 'rain_princess.pth',
        'B': 'starry-night.model',
        'C': 'style6.pth',
        'D': 'style8.pth',
        'E': 'style9.pth'
    }
    txt = []
    txt.append(session['title'])
    txt.append(session['comment_1'] + '\n' + session['comment_2'])

    pos = choosetemplate('./static/' + session["imagepath_2"],
                         './static/' + session["imagepath_1"], txt,
                         "./static/img/" + style + "results_notext.jpg")
    if style != 'Z':
        device = torch.device("cuda")
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load('../ST/saved_models/' + modelname[style])
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
        stylize("./static/img/" + style + "results_notext.jpg",
                "./static/img/" + style + "results_notext_S.png",
                model=style_model,
                device=device)
        torch.cuda.empty_cache()
        # stylize("./static/img/"+style+"results_notext.jpg", "./static/img/"+style+"results_notext_S.png", model='../ST/saved_models/'+modelname[style])
        addallimage("./static/img/" + style + "results_notext_S.png", pos,
                    "./static/img/results.jpg")
    else:
        addallimage("./static/img/" + style + "results_notext.jpg", pos,
                    "./static/img/results.jpg")
    torch.cuda.empty_cache()
    return render_template("style.html", style_img=session['style_img'])
Example #28
0
def multi_style(path, width=320, device=device):
    model_iter = itertools.cycle(os.listdir(path))
    model_file = next(model_iter)
    print(f'Using {model_file} ')
    model_path = os.path.join(path, model_file)
    model = TransformerNet()
    model.load_state_dict(read_state_dict(model_path))
    model.to(device)

    vs = VideoStream(src=0).start()
    time.sleep(2.0)
    timer = Timer()
    while (True):
        frame = vs.read()
        if frame is None:
            frame = np.random.randint(0,
                                      255, (int(width / 1.5), width, 3),
                                      dtype=np.uint8)
        frame = cv2.flip(frame, 1)
        frame = resize(frame, width=width)

        # Style the frame
        img = style_frame(frame, model).numpy()
        img = np.clip(img, 0, 255)
        img = img.astype(np.uint8)

        img = img.transpose(1, 2, 0)
        img = cv2.resize(img[:, :, ::-1], (640, 480))

        # print(img.shape)
        cv2.imshow("Output", img)
        timer()
        key = cv2.waitKey(1) & 0xFF
        if key == ord("n"):
            model_file = next(model_iter)
            print(f'Using {model_file} ')
            model_path = os.path.join(path, model_file)
            model.load_state_dict(read_state_dict(model_path))
            model.to(device)
        elif key == ord("q"):
            break
Example #29
0
def stylize(img_stream, style_type):
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    content_image = load_image(img_stream, scale=None)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    with torch.no_grad():
        style_model = TransformerNet()
        models_dir = './saved_models/'
        state_dict = torch.load(models_dir + '{}'.format(style_type))
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image).cpu()
    return misc.toimage(output[0])
Example #30
0
def stylize(input_image, style, cuda):
    device = torch.device("cuda" if cuda else "cpu")

    content_image = utils.load_image(input_image, scale=None)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load("saved_models/" + style + ".pth")
        # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image).cpu()

    utils.save_image("results/" + style + '_out.jpg', output[0])
Example #31
0
def stylize(args):
    if torch.cuda.is_available():
        print('CUDA available, using GPU.')
        device = torch.device('cuda')
    else:
        print('GPU training unavailable... using CPU.')
        device = torch.device('cpu')

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])

    # NOTE: Remove UNSQUEEZE, move to TransformerNet for CoreML UIImage input...
    # content_image = content_transform(content_image).to(device)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model,
                                            content_image,
                                            args.export_onnx,
                                            input_names=['inputImage']).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0])
Example #32
0
def stylize(args):  # 提供了一个测试,当我们训练好了模型,就可以用这个函数来帮我们生成图片了
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image,
                                     scale=args.content_scale)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    with torch.no_grad():
        style_model = TransformerNet()
        state_dict = torch.load(args.model)
        # 从checkpoint删除InstanceNorm中已保存的不建议使用的running_ *keys
        for k in list(state_dict.keys()):
            if re.search(r'in\d+\.running_(mean|var)$', k):
                del state_dict[k]
        style_model.load_state_dict(state_dict)
        style_model.to(device)
        output = style_model(content_image).cpu()
        utils.save_image(args.output_image, output[0])
Example #33
0
def train(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    transform = transforms.Compose([
        transforms.Resize(args.image_size),
        transforms.CenterCrop(args.image_size),
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    train_dataset = datasets.ImageFolder(args.dataset, transform)
    train_loader = DataLoader(train_dataset, batch_size=args.batch_size)

    transformer = TransformerNet().to(device)
    optimizer = Adam(transformer.parameters(), args.lr)
    mse_loss = torch.nn.MSELoss()

    vgg = Vgg16(requires_grad=False).to(device)
    style_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    style = utils.load_image(args.style_image, size=args.style_size)
    style = style_transform(style)
    style = style.repeat(args.batch_size, 1, 1, 1).to(device)

    features_style = vgg(utils.normalize_batch(style))
    gram_style = [utils.gram_matrix(y) for y in features_style]

    for e in range(args.epochs):
        transformer.train()
        agg_content_loss = 0.
        agg_style_loss = 0.
        count = 0
        for batch_id, (x, _) in enumerate(train_loader):
            n_batch = len(x)
            count += n_batch
            optimizer.zero_grad()

            x = x.to(device)
            y = transformer(x)

            y = utils.normalize_batch(y)
            x = utils.normalize_batch(x)

            features_y = vgg(y)
            features_x = vgg(x)

            content_loss = args.content_weight * mse_loss(features_y.relu2_2, features_x.relu2_2)

            style_loss = 0.
            for ft_y, gm_s in zip(features_y, gram_style):
                gm_y = utils.gram_matrix(ft_y)
                style_loss += mse_loss(gm_y, gm_s[:n_batch, :, :])
            style_loss *= args.style_weight

            total_loss = content_loss + style_loss
            total_loss.backward()
            optimizer.step()

            agg_content_loss += content_loss.item()
            agg_style_loss += style_loss.item()

            if (batch_id + 1) % args.log_interval == 0:
                mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\ttotal: {:.6f}".format(
                    time.ctime(), e + 1, count, len(train_dataset),
                                  agg_content_loss / (batch_id + 1),
                                  agg_style_loss / (batch_id + 1),
                                  (agg_content_loss + agg_style_loss) / (batch_id + 1)
                )
                print(mesg)

            if args.checkpoint_model_dir is not None and (batch_id + 1) % args.checkpoint_interval == 0:
                transformer.eval().cpu()
                ckpt_model_filename = "ckpt_epoch_" + str(e) + "_batch_id_" + str(batch_id + 1) + ".pth"
                ckpt_model_path = os.path.join(args.checkpoint_model_dir, ckpt_model_filename)
                torch.save(transformer.state_dict(), ckpt_model_path)
                transformer.to(device).train()

    # save model
    transformer.eval().cpu()
    save_model_filename = "epoch_" + str(args.epochs) + "_" + str(time.ctime()).replace(' ', '_') + "_" + str(
        args.content_weight) + "_" + str(args.style_weight) + ".model"
    save_model_path = os.path.join(args.save_model_dir, save_model_filename)
    torch.save(transformer.state_dict(), save_model_path)

    print("\nDone, trained model saved at", save_model_path)
Example #34
0
def train(**kwargs):
    opt = Config()
    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)
    
    device=t.device('cuda') if opt.use_gpu else t.device('cpu')
    vis = utils.Visualizer(opt.env)

    # 数据加载
    transfroms = tv.transforms.Compose([
        tv.transforms.Resize(opt.image_size),
        tv.transforms.CenterCrop(opt.image_size),
        tv.transforms.ToTensor(),
        tv.transforms.Lambda(lambda x: x * 255)
    ])
    dataset = tv.datasets.ImageFolder(opt.data_root, transfroms)
    dataloader = data.DataLoader(dataset, opt.batch_size)

    # 转换网络
    transformer = TransformerNet()
    if opt.model_path:
        transformer.load_state_dict(t.load(opt.model_path, map_location=lambda _s, _: _s))
    transformer.to(device)

    # 损失网络 Vgg16
    vgg = Vgg16().eval()
    vgg.to(device)
    for param in vgg.parameters():
        param.requires_grad = False

    # 优化器
    optimizer = t.optim.Adam(transformer.parameters(), opt.lr)

    # 获取风格图片的数据
    style = utils.get_style_data(opt.style_path)
    vis.img('style', (style.data[0] * 0.225 + 0.45).clamp(min=0, max=1))
    style = style.to(device)


    # 风格图片的gram矩阵
    with t.no_grad():
        features_style = vgg(style)
        gram_style = [utils.gram_matrix(y) for y in features_style]

    # 损失统计
    style_meter = tnt.meter.AverageValueMeter()
    content_meter = tnt.meter.AverageValueMeter()

    for epoch in range(opt.epoches):
        content_meter.reset()
        style_meter.reset()

        for ii, (x, _) in tqdm.tqdm(enumerate(dataloader)):

            # 训练
            optimizer.zero_grad()
            x = x.to(device)
            y = transformer(x)
            y = utils.normalize_batch(y)
            x = utils.normalize_batch(x)
            features_y = vgg(y)
            features_x = vgg(x)

            # content loss
            content_loss = opt.content_weight * F.mse_loss(features_y.relu2_2, features_x.relu2_2)

            # style loss
            style_loss = 0.
            for ft_y, gm_s in zip(features_y, gram_style):
                gram_y = utils.gram_matrix(ft_y)
                style_loss += F.mse_loss(gram_y, gm_s.expand_as(gram_y))
            style_loss *= opt.style_weight

            total_loss = content_loss + style_loss
            total_loss.backward()
            optimizer.step()

            # 损失平滑
            content_meter.add(content_loss.item())
            style_meter.add(style_loss.item())

            if (ii + 1) % opt.plot_every == 0:
                if os.path.exists(opt.debug_file):
                    ipdb.set_trace()

                # 可视化
                vis.plot('content_loss', content_meter.value()[0])
                vis.plot('style_loss', style_meter.value()[0])
                # 因为x和y经过标准化处理(utils.normalize_batch),所以需要将它们还原
                vis.img('output', (y.data.cpu()[0] * 0.225 + 0.45).clamp(min=0, max=1))
                vis.img('input', (x.data.cpu()[0] * 0.225 + 0.45).clamp(min=0, max=1))

        # 保存visdom和模型
        vis.save([opt.env])
        t.save(transformer.state_dict(), 'checkpoints/%s_style.pth' % epoch)