コード例 #1
0
ファイル: neural_style.py プロジェクト: nhvinh118/pset-4-5
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image,
                                     scale=args.content_scale)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(
                    ".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image,
                                            args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0])
コード例 #2
0
ファイル: ai_server.py プロジェクト: tyhtm3/Photory-AI
def tale():
    data = json.loads(request.get_data(), encoding='utf-8')
    story_pk = data['story_pk']
    paths = data['imagePaths']

    cnt = 0

    for path in paths:
        # image load from url
        image_url = IMAGE_SERVER_URL + '/media/' +path
        res = requests.get(image_url)
        img = Image.open(BytesIO(res.content))
        img = np.asarray(img)

        for i in range(6):
            with tf.Graph().as_default():
                transformer = style_transfer_tester.StyleTransferTester(
                    img, 'neural_style/fast_neural_style/' + STYLE_PATH[i]
                )
                output = transformer.test()
            
            result_path = str(story_pk) + '_' + str(cnt) + '_'+ str(i) + '.jpg'
            utils.save_image(output, 'static/' + result_path)
        cnt = cnt + 1

    params = {
        'story_pk' : story_pk,
        'tale': ['아직', '준비중인', '컨텐츠', '입니다', 'ㅎㅎ']
    }

    requests.post('http://127.0.0.1:8000/storys/editable/', data = params)
    return 'ok'
コード例 #3
0
ファイル: app.py プロジェクト: skai2/FastNeuralStyle
def stylize(input_filepath, output_filepath, checkpoint, content_scale=None):
    content_image = utils.load_image(input_filepath, scale=content_scale)
    content_transform = transforms.Compose(
        [transforms.ToTensor(),
         transforms.Lambda(lambda x: x.mul(255))])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0)
    if CUDA:
        content_image = content_image.cuda()
    content_image = Variable(content_image, volatile=True)

    style_model = TransformerNet()
    style_model.load_state_dict(torch.load(checkpoint))
    if CUDA:
        style_model.cuda()
    output = style_model(content_image)
    if CUDA:
        output = output.cpu()
    output_data = output.data[0]
    utils.save_image(output_filepath, output_data)
コード例 #4
0
ファイル: ai_server.py プロジェクト: tyhtm3/Photory-AI
def style():
    # Get image url from json
    path = json.loads(request.get_data(), encoding='utf-8')
    image_url = path['url']
    
    # Image load from url
    res = requests.get(image_url)
    img = Image.open(BytesIO(res.content))
    img = np.asarray(img)

    img_extension = image_url[-4:]
    img_extension_path = tf.keras.utils.get_file('image'+img_extension,
                                                origin=image_url)

    for i in range(1,6):
        with tf.Graph().as_default():
        # run neural network
            transformer = style_transfer_tester.StyleTransferTester(
                img, 'neural_style/fast_neural_style/wave.ckpt'
            )
            output = transformer.test()

        # save result
        result_path = 'asdf3.jpg'
        utils.save_image(output, 'static/'+str(i)+'_1'+result_path)

        with tf.Graph().as_default():
            # run neural network
            transformer = style_transfer_tester.StyleTransferTester(
                img, 'neural_style/fast_neural_style/udnie.ckpt'
            )
            output = transformer.test()

        # save result
        result_path = 'asdf3.jpg'
        utils.save_image(output, 'static/'+str(i)+'_2'+result_path)
    
    with tf.Graph().as_default():
        caption_model =  Image_caption()
        result_cap , plot = caption_model(img_extension_path)
    return result_cap
コード例 #5
0

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

## put your original images in this directory
input_dir = Path('./inputs')

with torch.no_grad():
    style_model = TransformerNet()
    state_dict = torch.load('nausicaa3.model')
    for k in list(state_dict.keys()):
        if re.search(r'in\d+\.running_(mean|var)$', k):
            del state_dict[k]
    style_model.load_state_dict(state_dict)
    style_model.to(device)

    for imagefile in input_dir.glob('*.jpg'):

        content_image = utils.load_image(imagefile, scale=None)
        content_transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Lambda(lambda x: x.mul(255))
        ])
        content_image = content_transform(content_image)
        content_image = content_image.unsqueeze(0).to(device)

        output = style_model(content_image).cpu()

        # styled images will be saved to this directory
        utils.save_image(f'./outputs/{imagefile.name}', output[0])