Ejemplo n.º 1
0
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/cpu:0', bw=False):

    img_shape = get_img(data_in).shape

    g = tf.Graph()

    with g.as_default(), g.device(device_t), tf.Session(config=soft_config) as sess:
        batch_shape = (1,) + img_shape
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)

        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        X = np.zeros(batch_shape, dtype=np.float32)

        if bw:
            img = get_bw_img(data_in)
        else:
            img = get_img(data_in)

        X[0] = img

        _preds = sess.run(preds, feed_dict={img_placeholder:X})
        save_img(paths_out, _preds[0])
Ejemplo n.º 2
0
def style_transfer(style, in_path, out_path, device_t='/gpu:0', batch_size=1):
    print("Log: Begin - style transfer")
    curr_style = STYLE_MODEL_MAP[style]

    img_shape = get_img(in_path).shape
    graph = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with graph.as_default(), graph.device(device_t), tf.Session(config=soft_config) as sess:
        img_val = tf.placeholder(tf.float32, shape=(1,) + img_shape, name='img_val')
        preds = transform.net(img_val)
        saver = tf.train.Saver()
        if os.path.isdir(curr_style):
            ckpt = tf.train.get_checkpoint_state(curr_style)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, curr_style)

        img = get_img(in_path)
        _preds = sess.run(preds, feed_dict={img_val:[img]})
        save_img(out_path, _preds[0])
    print("Log: End - style transfer - Success√")
    return _preds[0]
Ejemplo n.º 3
0
def main():
    start = time.time()
    if len(args.style_paths) > 2:
        raise Exception('Maximum number of styles should be 2')

    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints,
                    relu_targets=args.relu_targets,
                    vgg_path=args.vgg_path,
                    device=args.device,
                    ss_patch_size=args.ss_patch_size,
                    ss_stride=args.ss_stride,
                    alpha=args.alpha,
                    beta=args.beta)

    os.makedirs(args.out_path, exist_ok=True)

    content_img = get_img(args.content_path, args.content_size)
    styles = [get_img(path, args.style_size) for path in args.style_paths]

    _, content_ext = os.path.splitext(args.content_path)
    output_filename = os.path.join(
        args.out_path, "result.jpg"
    )  #f'{args.content_path}_{args.style_path_a}.{content_ext}')
    output = stylize_output(wct_model, content_img, styles)
    save_img(output_filename, output)

    print("Finished stylizing in {}s".format(time.time() - start))
Ejemplo n.º 4
0
def main():
    batch_size = 1
    checkpoint_dir = 'style/wave.ckpt'
    device_t = '/gpu:0'
    data_in = 'in/chicago.jpg'
    path_out = 'out/chicago.jpg'
    img_shape = get_img(data_in).shape

    is_paths = type(data_in[0]) == str
    g = tf.Graph()
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), tf.Session(
            config=soft_config) as sess:
        batch_shape = (batch_size, ) + img_shape
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')
        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()

        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        X = np.zeros(batch_shape, dtype=np.float32)
        img = get_img(data_in)
        X[0] = img
        _preds = sess.run(preds, feed_dict={img_placeholder: X})
        save_img(path_out, _preds[0])
 def fill(self):
     self.canv.create_rectangle(*self.hero.pos,
                                self.hero.pos[0] + 30,
                                self.hero.pos[1] + 30,
                                fill='red',
                                outline='red')
     get_img(self.canv, self.hero.pos)
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):

    assert len(paths_out) > 0
    is_paths = type(data_in[0]) == str
    if is_paths:
        assert len(data_in) == len(paths_out)
        img_shape = get_img(data_in[0]).shape
    else:
        assert data_in.size[0] == len(paths_out)
        img_shape = X[0].shape

    g = tf.Graph()

    batch_size = min(len(paths_out), batch_size)
    curr_num = 0
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (batch_size,) + img_shape
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        num_iters = int(len(paths_out)/batch_size)
        print(str(num_iters))
        for i in range(num_iters):
            pos = i * batch_size
            curr_batch_out = paths_out[pos:pos+batch_size]
            if is_paths:
                curr_batch_in = data_in[pos:pos+batch_size]
                X = np.zeros(batch_shape, dtype=np.float32)
                for j, path_in in enumerate(curr_batch_in):
                    img = get_img(path_in)
                    assert img.shape == img_shape, \
                        'Images have different dimensions. ' +  \
                        'Resize images or use --allow-different-dimensions.'
                    X[j] = img
            else:
                X = data_in[pos:pos+batch_size]

            _preds = sess.run(preds, feed_dict={img_placeholder:X})
            for j, path_out in enumerate(curr_batch_out):
                save_img(path_out, _preds[j])
                
        remaining_in = data_in[num_iters*batch_size:]
        remaining_out = paths_out[num_iters*batch_size:]
    if len(remaining_in) > 0:
        ffwd(remaining_in, remaining_out, checkpoint_dir, 
            device_t=device_t, batch_size=1)
Ejemplo n.º 7
0
def ffwd(data_in, paths_out, checkpoint_dir, device_t='/gpu:0', batch_size=4):
    assert len(paths_out) > 0
    is_paths = type(data_in[0]) == str
    if is_paths:
        assert len(data_in) == len(paths_out)
        img_shape = get_img(data_in[0]).shape
    else:
        assert data_in.size[0] == len(paths_out)
        img_shape = X[0].shape

    g = tf.Graph()
    batch_size = min(len(paths_out), batch_size)
    curr_num = 0
    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), \
            tf.Session(config=soft_config) as sess:
        batch_shape = (batch_size,) + img_shape
        img_placeholder = tf.placeholder(tf.float32, shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()
        if os.path.isdir(checkpoint_dir):
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
            else:
                raise Exception("No checkpoint found...")
        else:
            saver.restore(sess, checkpoint_dir)

        num_iters = int(len(paths_out)/batch_size)
        for i in range(num_iters):
            pos = i * batch_size
            curr_batch_out = paths_out[pos:pos+batch_size]
            if is_paths:
                curr_batch_in = data_in[pos:pos+batch_size]
                X = np.zeros(batch_shape, dtype=np.float32)
                for j, path_in in enumerate(curr_batch_in):
                    img = get_img(path_in)
                    assert img.shape == img_shape, \
                        'Images have different dimensions. ' +  \
                        'Resize images or use --allow-different-dimensions.'
                    X[j] = img
            else:
                X = data_in[pos:pos+batch_size]

            _preds = sess.run(preds, feed_dict={img_placeholder:X})
            for j, path_out in enumerate(curr_batch_out):
                save_img(path_out, _preds[j])
                
        remaining_in = data_in[num_iters*batch_size:]
        remaining_out = paths_out[num_iters*batch_size:]
    if len(remaining_in) > 0:
        ffwd(remaining_in, remaining_out, checkpoint_dir, 
            device_t=device_t, batch_size=1)
Ejemplo n.º 8
0
def ffwd(data_in, paths_out, model, device_t='', batch_size=1):
    assert len(paths_out) > 0
    is_paths = type(data_in[0]) == str
    # TODO:如果 data_in 是保存输入图像的文件路径,即 is_paths 为 True,则读入第一张图像,由于 pb 模型的输入维度为 1 × 256 × 256 × 3, 因此需将输入图像的形状调整为 256 × 256,并传递给 img_shape;
    # 如果 data_in 是已经读入图像并转化成数组形式的数据,即 is_paths 为 False,则直接获取图像的 shape 特征 img_shape
    if is_paths:
        # Get the shape when loading the first image.
        img_shape = get_img(data_in[0], (256, 256, 3)).shape
    else:
        # Get the shape from data_in[0] when the images have been loaded.
        img_shape = data_in[0].shape

    g = tf.Graph()
    config = tf.ConfigProto(allow_soft_placement=True,
                            inter_op_parallelism_threads=1,
                            intra_op_parallelism_threads=1)
    with g.as_default():
        with tf.gfile.FastGFile(model, 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            tf.import_graph_def(graph_def, name='')

        with tf.Session(config=config) as sess:
            sess.run(tf.global_variables_initializer())
            input_tensor = sess.graph.get_tensor_by_name('X_content:0')
            output_tensor = sess.graph.get_tensor_by_name('add_37:0')
            batch_size = 1
            # TODO:读入的输入图像的数据格式为 HWC,还需要将其转换成 NHWC
            batch_shape = (batch_size, ) + img_shape
            num_iters = int(len(paths_out) / batch_size)
            for i in range(num_iters):
                pos = i * batch_size
                curr_batch_out = paths_out[pos:pos + batch_size]
                # TODO:如果 data_in 是保存输入图像的文件路径,则依次将该批次中输入图像文件路径下的 batch_size 张图像读入数组 X;
                # 如果 data_in 是已经读入图像并转化成数组形式的数据,则将该数组传递给 X
                if is_paths:
                    curr_batch_in = data_in[pos:pos + batch_size]
                    X = np.zeros(batch_shape)
                    #print(X.shape)
                    for j, path in enumerate(curr_batch_in):
                        img = get_img(path, img_shape)
                        X[j] = img
                else:
                    X = data_in[pos:pos + batch_size]

                start = time.time()
                # TODO: 使用 sess.run 来计算 output_tensor
                _preds = sess.run(output_tensor, feed_dict={input_tensor: X})
                end = time.time()
                for j, path_out in enumerate(curr_batch_out):
                    #TODO:在该批次下调用 utils.py 中的 save_img() 函数对所有风格迁移后的图片进行存储
                    save_img(path_out, _preds[j])
                delta_time = end - start
                print("Inference (CPU) processing time: %s" % delta_time)
Ejemplo n.º 9
0
 def __init__(self,
              content_path='Data/cornell.jpg',
              style_path='Data/starry-night.jpg'):
     self.content = get_img(content_path)
     self.style = get_img(style_path)
     self.use_cuda = torch.cuda.is_available()
     if self.use_cuda:
         self.content = self.content.cuda()
         self.style = self.style.cuda()
     vgg = get_truncated_vgg()
     self.style_activations = vgg(self.style)
     self.loss_fn = NSTLoss(self.style_activations, vgg)
Ejemplo n.º 10
0
def search_product(url: str, category: str = 'none') -> dict:
    """
    Return a product for a given url
    """
    prod_page = requests.get(url)
    prod_soup = BeautifulSoup(prod_page.content, "html.parser")
    # Titre, Desc, Img URL
    prod_title = prod_soup.h1.string
    if len(prod_soup.select("div#product_description + p")) > 0:
        prod_desc = prod_soup.select("div#product_description + p")[0].string
    else:
        prod_desc = "No Description"
    prod_img_url = f"{URL}/" + prod_soup.img["src"][6:]

    # UPC, Price, Count, Categorie
    prod_info_tab = map(lambda tag: tag.string, prod_soup("td"))
    (
        prod_upc,
        prod_category,
        prod_price_excl,
        prod_price_incl,
        _,
        prod_count,
        _,
    ) = prod_info_tab

    if settings.download_image_option:
        if category == 'none':
            filename = prod_upc
            get_img(prod_img_url, filename.lower())
        else:
            filename = f"{category}_{prod_upc}"
            get_img(prod_img_url,filename.lower(), category)

    # Format Available
    prod_count = "".join(filter(str.isdigit, prod_count))

    # Rating
    prod_rating = format_rating(prod_soup.find("p", class_="star-rating")["class"][-1])
    return {
        "page_url": url,
        "title": prod_title,
        "desc": prod_desc,
        "img_url": prod_img_url,
        "upc": prod_upc,
        "price_excl": prod_price_excl,
        "price_incl": prod_price_incl,
        "available": prod_count,
        "category": prod_category,
        "rating": prod_rating,
    }
Ejemplo n.º 11
0
def get_stylize_image(content_fullpath, style_fullpath, output_path,
                      content_size=256, style_size=256, alpha=0.6,
                      swap5=False, ss_alpha=0.6, adain=False):
    content_img = get_img(content_fullpath)
    content_img = resize_to(content_img, int(content_size))

    style_img = get_img(style_fullpath)
    style_img = resize_to(style_img,int(style_size))

    stylized_rgb = wct_model.predict(
        content_img, style_img, alpha, swap5, ss_alpha, adain)

    save_img(output_path, stylized_rgb)
    print("stylized image saved "+output_path)
Ejemplo n.º 12
0
def forward_prop(data_in,
                 paths_out,
                 checkpoint_dir,
                 device_t='/cpu:0',
                 batch_size=1):
    assert len(paths_out) > 0
    is_paths = type(data_in) == str

    img_shape = get_img(data_in).shape

    #print("Batch size: ", batch_size)

    g = tf.Graph()

    soft_config = tf.ConfigProto(allow_soft_placement=True)
    soft_config.gpu_options.allow_growth = True
    with g.as_default(), g.device(device_t), tf.Session(
            config=soft_config) as sess:
        batch_shape = (batch_size, ) + img_shape
        #print("Batch_shape: ", batch_shape)
        img_placeholder = tf.placeholder(tf.float32,
                                         shape=batch_shape,
                                         name='img_placeholder')

        preds = transform.net(img_placeholder)
        saver = tf.train.Saver()

        #Restore checkpoint in session
        saver.restore(sess, checkpoint_dir)

        curr_batch_out = paths_out

        if is_paths:
            curr_batch_in = data_in
            print("curr_batch_in: ", curr_batch_in)
            print("curr_batch_out: ", curr_batch_out)
            X = np.zeros(batch_shape, dtype=np.float32)

            img = get_img(curr_batch_in)
            assert img.shape == img_shape, 'Images have different dimensions. ' + 'Resize images'
            X[0] = img
            #print("Shape: ", X.shape)  #(1,960,960,3)

        _preds = sess.run(preds, feed_dict={img_placeholder: X})

        save_img(curr_batch_out, _preds[0])
        sess.close()
    print("Done!!")
    return curr_batch_out
Ejemplo n.º 13
0
 def click(self, event):
     self.loc_pos.append(event.x)
     self.loc_pos.append(event.y)
     if len(self.loc_pos) == 4:
         if self.is_Create_mode:
             self.create_wall(self.loc_pos)
             self.loc_pos = []
     if self.is_repl_mode:
         ex, ey = event.x, event.y
         ex += 30 - ex % 30 if ex % 30 > 15 else -(ex % 30)
         ey += 30 - ey % 30 if ey % 30 > 15 else -(ey % 30)
         self.hero.pos = ex, ey
         self.hero.first_pos = ex, ey
         get_img(self.canv, self.hero.pos)
     if len(self.loc_pos) > 4: self.loc_pos = []
Ejemplo n.º 14
0
def ffwd_different_dimensions(
        in_path,
        out_path,
        checkpoint_dir,
        device_t=DEVICE,
        batch_size=4,
        data_format='NHWC',
        num_base_channels=32  # more cli params
):
    in_path_of_shape = defaultdict(list)
    out_path_of_shape = defaultdict(list)
    for i in range(len(in_path)):
        in_image = in_path[i]
        out_image = out_path[i]
        shape = "%dx%dx%d" % get_img(in_image).shape
        in_path_of_shape[shape].append(in_image)
        out_path_of_shape[shape].append(out_image)
    for shape in in_path_of_shape:
        print('Processing images of shape %s' % shape)
        ffwd(in_path_of_shape[shape],
             out_path_of_shape[shape],
             checkpoint_dir,
             device_t,
             batch_size,
             data_format=data_format,
             num_base_channels=num_base_channels)
Ejemplo n.º 15
0
def ffwd_different_dimensions(in_path,
                              out_path,
                              checkpoint_dir,
                              device_t=DEVICE,
                              batch_size=4,
                              testing=False):

    if testing:
        print('start ffwd_different_dimensions')
        print(' in_path: ', in_path)
        print(' out_path: ', out_path)
        print(' checkpoint_dir: ', checkpoint_dir)
        print(' device_t: ', device_t)
        print(' batch_size: ', batch_size)

    in_path_of_shape = defaultdict(list)
    out_path_of_shape = defaultdict(list)
    for i in range(len(in_path)):
        in_image = in_path[i]
        out_image = out_path[i]
        shape = "%dx%dx%d" % get_img(in_image).shape
        in_path_of_shape[shape].append(in_image)
        out_path_of_shape[shape].append(out_image)
    for shape in in_path_of_shape:
        print('Processing images of shape %s' % shape)
        ffwd(in_path_of_shape[shape], out_path_of_shape[shape], checkpoint_dir,
             device_t, batch_size)
Ejemplo n.º 16
0
def main():
    ### 注意这里的引用关系,实际上是argparse.parse_args()!
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)
    ###

    ### 这里就是用自己写的get_img去拿出参数里面的style图像!
    style_target = get_img(options.style)
    ###

    ### 处理传进来的参数!
    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate
    }
    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1
        args = [
            content_targets, style_target, options.content_weight,
            options.style_weight, options.tv_weight, options.vgg_path
        ]


###

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses
Ejemplo n.º 17
0
    def __init__(self, master):
        self.master = master
        self.frame = tk.Frame(self.master)
        self.button1 = tk.Button(self.frame,
                                 text='редактировать код',
                                 width=25,
                                 command=self.new_window)
        self.button2 = tk.Button(self.frame,
                                 text='поставить стены',
                                 width=25,
                                 command=self.get_diff_mode)
        self.button3 = Button(self.frame,
                              text='сброс',
                              width=25,
                              command=self.from_start)
        self.button4 = Button(self.frame,
                              text='переместить',
                              width=25,
                              command=self.get_repl_mode)

        self.button1.pack()
        self.button2.pack()
        self.button4.pack()
        self.frame.pack()
        self.conf_canvas()
        self.button3.pack()
        hero = get_img(self.canv, pos=(300, 420))
        self.hero = Hero(self.canv, hero)
        self.cons = Console(self.hero)
        self.cons.txt.pack(side='bottom')
        self.hero.console = self.cons
Ejemplo n.º 18
0
def post_params():
    myid = str(int(time.time() * 10000)) + ".jpg"
    tmp_img_post_path = RESULT_IMG_PATH + myid

    style_img_url = STYLE_IMG_PATH + request.form['style_img_url']
    content_img_url = CONTENT_IMG_PATH + request.form['content_img_url']
    alpha = float(request.form['alpha'])
    content_img = get_img(content_img_url)
    style_size = int(float(request.form['style_scale']) * 512)
    keep_colors = False

    # style_img = get_img_crop(style_img_url, resize=style_size)
    style_img = get_img_crop(style_img_url)

    if style_size > 0:
        style_img = resize_to(style_img, style_size)

    if keep_colors:
        style_img = preserve_colors_np(style_img, content_img)

    # Run the frame through the style network
    stylized_rgb = wct_model.predict(content_img, style_img, alpha, False, 0.6,
                                     False)

    save_img(tmp_img_post_path, stylized_rgb)
    return Response(myid, status=200, mimetype='application/json')
def save_vectors_file():
    data = load_data(FLAGS.data_path)

    vectorizer = Vectorizer()

    logging.info('getting vectors')
    img_vectors = []
    genders = []
    for img_path, gender_id in tqdm(data.items()):
        try:
            img_array = get_img(img_path)

            vector = vectorizer.get_vector(img_array)

            img_vectors.append(vector)
            genders.append(gender_id)
        except Exception as e:
            logging.warning('exception: {}'.format(e))

    vectorizer.close()

    dim_reduction_technique = get_dim_reduction_technique(
        FLAGS.dim_reduction_technique)

    reduced, model = dim_reduction_technique(img_vectors, FLAGS.n_dimensions)

    save_pkl_file(model, FLAGS.reducter_path)
    save_pkl_file((reduced, genders), FLAGS.vectors_path)
Ejemplo n.º 20
0
def ffwd_different_dimensions(in_path,
                              out_path,
                              checkpoint_dir,
                              device_t=DEVICE,
                              batch_size=4):
    # 次元数の違う画像のffwdを行うための関数ffwd_different_dimensions()
    in_path_of_shape = defaultdict(list)
    # in_path_of_shape変数に、list型のcallableなインスタンスを生成して格納する
    out_path_of_shape = defaultdict(list)
    # out_path_of_shape変数にも、list型のcallableなインスタンスを生成して格納する
    # 上の2つの変数がdefaultdictを使って初期化している理由は、宣言されていない
    # 要素にあとから値を追加しても正常に動作するようにするため
    for i in range(len(in_path)):
        # in_pathに格納されているパスの分だけforループを行う
        in_image = in_path[i]
        # in_image変数に、in_pathのi番目の要素を格納する
        out_image = out_path[i]
        # out_image変数には、out_imageのi番目の要素を格納する
        shape = "%dx%dx%d" % get_img(in_image).shape
        # その画像がどんな形をしているのかを文字列で表現し、shape変数に格納する
        in_path_of_shape[shape].append(in_image)
        # in_path_of_shape変数のshape要素のところにin_pathのi番目の要素を格納する
        out_path_of_shape[shape].append(out_image)
        # out_path_of_shape変数のshape要素のところにout_pathのi番目の要素を格納する
    for shape in in_path_of_shape:
        # in_path_of_shape変数に格納された画像の形の分だけforループを行う
        print('Processing images of shape %s' % shape)
        # shapeの画像を現在処理中という旨の分をコンソールに出力する
        ffwd(in_path_of_shape[shape], out_path_of_shape[shape], checkpoint_dir,
             device_t, batch_size)
Ejemplo n.º 21
0
def ffwd_different_dimensions(in_path,
                              out_path,
                              checkpoint_dir,
                              device_t=DEVICE,
                              batch_size=BATCH_SIZE):
    ''' feed forward with diffrent dimension images '''

    in_path_of_shape = defaultdict(list)
    out_path_of_shape = defaultdict(list)

    for i in range(len(in_path)):
        in_image = in_path[i]
        out_image = out_path[i]

        shape = '%dx%dx%d' % get_img(in_image).shape

        in_path_of_shape[shape].append(in_image)
        out_path_of_shape[shape].append(out_image)

    for shape in in_path_of_shape:
        # process all the image of same dimensions
        print('Processing images of shape %s ' % shape)

        ffwd_img(in_path_of_shape[shape], out_path_of_shape[shape],
                 checkpoint_dir, device_t, batch_size)
Ejemplo n.º 22
0
    def __init__(self, *args, **kwargs) -> None:
        super().__init__(*args, **kwargs)
        self.n = 1000
        self.butterfly = utils.get_img(path.join('example', 'butterfly.png'))
        # Batching
        self.butterfly = self.butterfly.repeat(16, 1, 1, 1)
        self.m = torch.Tensor([
            [3.2, 0.016, -68],
            [1.23, 1.7, -54],
            [0.008, 0.0001, 1],
        ])
        if cuda.is_available():
            self.butterfly = self.butterfly.cuda()
            self.m = self.m.cuda()

            with utils.Timer('Warm-up: {}'):
                for _ in range(100):
                    _ = core_warp.warp(
                        self.butterfly,
                        self.m,
                        sizes='auto',
                        kernel='bicubic',
                        fill_value=0,
                    )

                cuda.synchronize()
Ejemplo n.º 23
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    content_targets = list_files(options.train_path)
    kwargs = {
            "epochs":options.epochs,
            "print_iterations":options.checkpoint_iterations,
            "batch_size":options.batch_size,
            "checkpoint_dir":os.path.join(options.checkpoint_dir,'fns.ckpt'),
            "summary_dir":options.summary_dir,
            "learning_rate":options.learning_rate
            }
    args = [
            content_targets,
            style_target,
            options.content_weight,
            options.style_weight,
            options.tv_weight,
            options.vgg_path
            ]
    start_time = time.time()
    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses
        print('{0} ---------- Epoch: {1}, Iteration: {2}----------'.format(time.ctime(), epoch, i))
        print('Total loss: {0}, Style loss: {1}, Content loss: {2}, TV loss: {3}'
                .format(loss, style_loss, content_loss, tv_loss))
    print("Training complete! Total training time is {0} s".format(time.time() - start_time))
Ejemplo n.º 24
0
    def train(self):
        """
        Method for training the network
        """
        global_step = tf.compat.v1.train.get_or_create_global_step()

        trainable_variables = tf.compat.v1.trainable_variables()
        grads = tf.gradients(self.L_total, trainable_variables)

        optimizer = tf.compat.v1.train.AdamOptimizer(self.learn_rate)
        train_op = optimizer.apply_gradients(zip(grads, trainable_variables),
                                             global_step=global_step,
                                             name="train_step")

        self.sess.run(tf.compat.v1.global_variables_initializer())

        saver = tf.compat.v1.train.Saver()

        num_examples = len(self.x_list)

        epoch = 0
        iterations = 0

        while epoch < self.num_epochs:
            while iterations * self.batch_size < num_examples:

                curr = iterations * self.batch_size
                step = curr + self.batch_size
                x_batch = np.zeros(self.batch_shape, dtype=np.float32)
                for j, img_p in enumerate(self.x_list[curr:step]):
                    x_batch[j] = utils.get_img(img_p, (256, 256, 3)).astype(
                        np.float32)

                iterations += 1

                _, L_total, L_content, L_style, L_tv, step = self.sess.run(
                    [
                        train_op,
                        self.L_total,
                        self.L_content,
                        self.L_style,
                        self.L_tv,
                        global_step,
                    ],
                    feed_dict={
                        self.y_c: x_batch,
                        self.y_s: self.y_s0
                    },
                )

                print(f"epoch : {epoch}, iter : {step}, ")
                print(
                    f"L_total : {L_total}, L_content : {L_content}, L_style : {L_style}, L_tv : {L_tv}"
                )

            epoch += 1
            iterations = 0
        print("Saving final model...")
        _ = saver.save(self.sess, self.save_path + "/final.ckpt")
Ejemplo n.º 25
0
def main():
    print('ml5.js Style Transfer Training!')
    print('Note: This traning will take a couple of hours.')
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow":options.slow,
        "epochs":options.epochs,
        "print_iterations":options.checkpoint_iterations,
        "batch_size":options.batch_size,
        "save_path":os.path.join(options.checkpoint_dir,'fns.ckpt'),
        "learning_rate":options.learning_rate,
    }
    
    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets,
        style_target,
        options.content_weight,
        options.style_weight,
        options.tv_weight,
        options.vgg_path
    ]

    print('Training is starting!...')
    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir,epoch,i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test,preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
    print('Converting model to ml5js')
    dump_checkpoints(kwargs['save_path'], options.model_dir)
    print('Done! Checkpoint saved. Visit https://ml5js.org/docs/StyleTransfer for more information')
Ejemplo n.º 26
0
def main():
    print("Enter main")
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)
    print("Get image target")
    style_target = get_img(options.style)
    if not options.slow:
        with log_time_usage("get images targets"):
            content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "debug": options.debug,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
        "tensorboard_dir": options.tensorboard_dir
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    print("Start training")
    with log_time_usage("Training completed in"):
        for preds, losses, i, epoch, time_info in optimize(*args, **kwargs):
            style_loss, content_loss, tv_loss, loss = losses

            print(
                'Epoch %d, Iteration: %d, Loss: %s, AVG batch time: %.2f, total_time: %.2f, ETA (in h): %.2f'
                % (epoch, i, loss, *time_info))
            to_print = (style_loss, content_loss, tv_loss)
            print('style: %s, content:%s, tv: %s' % to_print)
            if options.test:
                assert options.test_dir != False
                preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
                if not options.slow:
                    ckpt_dir = os.path.dirname(options.checkpoint_dir)
                    evaluate.ffwd_to_img(options.test, preds_path,
                                         options.checkpoint_dir)
                else:
                    # TODO: img is not defined
                    # save_img(preds_path, img)
                    pass
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Ejemplo n.º 27
0
def main():
    check_version()
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow":options.slow,
        "epochs":options.epochs,
        "print_iterations":options.checkpoint_iterations,
        "batch_size":options.batch_size,
        "save_path":os.path.join(options.checkpoint_dir,'fns.ckpt'),
        "learning_rate":options.learning_rate,
        "device":options.device,
        "total_iterations":options.total_iterations,
        "base_model_path":options.base_model_path,
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets,
        style_target,
        options.content_weight,
        options.style_weight,
        options.tv_weight,
        options.vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        sys.stdout.flush()
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir,epoch,i)
            if not options.slow:
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test,preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint-dir %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Ejemplo n.º 28
0
def main(style,
         test=False,
         test_dir='test',
         train_path=TRAIN_PATH,
         slow=False,
         epochs=NUM_EPOCHS,
         checkpoint_iterations=CHECKPOINT_ITERATIONS,
         batch_size=BATCH_SIZE,
         checkpoint_dir=CHECKPOINT_DIR,
         learning_rate=LEARNING_RATE,
         content_weight=CONTENT_WEIGHT,
         style_weight=STYLE_WEIGHT,
         tv_weight=TV_WEIGHT,
         vgg_path=VGG_PATH):
    #parser = build_parser()
    #options = parser.parse_args()
    #check_opts(options)

    style_target = get_img(style)
    if not slow:
        content_targets = _get_files(train_path)
    elif test:
        content_targets = [test]

    kwargs = {
        "slow": slow,
        "epochs": epochs,
        "print_iterations": checkpoint_iterations,
        "batch_size": batch_size,
        "save_path": checkpoint_dir,
        "learning_rate": learning_rate
    }

    if slow:
        if epochs < 10:
            kwargs['epochs'] = 1000
        if learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, content_weight, style_weight, tv_weight,
        vgg_path
    ]

    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses

        print('Epoch %d, Iteration: %d, Loss: %s' % (epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('style: %s, content:%s, tv: %s' % to_print)
        if test:
            assert test_dir != False
            preds_path = '%s/%s_%s.png' % (test_dir, epoch, i)
            if not slow:
                ckpt_dir = os.path.dirname(checkpoint_dir)
                evaluate.ffwd_to_img(test, preds_path, checkpoint_dir)
            else:
                save_img(preds_path, img)
Ejemplo n.º 29
0
    def model(self):
        # X_reader = Reader('data/tfrecords/man2woman/man.tfrecords', name='X',
        #     image_size=self.image_size, batch_size=self.batch_size)
        # Y_reader = Reader('data/tfrecords/man2woman/woman.tfrecords', name='Y',
        #     image_size=self.image_size, batch_size=self.batch_size)

        # x = X_reader.feed()
        # y = Y_reader.feed()
        x = utils.get_img(self.file_x, self.image_size, self.image_size, self.batch_size)
        y = utils.get_img(self.file_y, self.image_size, self.image_size, self.batch_size)

        fake_y = self.G(x)
        fake_x = self.F(y)

        cycle_loss = self.cycle_consistency_loss(self.G, self.F, x, y)
        G_gan_loss, F_gan_loss, D_loss_x, D_loss_y = self.gan_loss(self.D_Y, self.D_X, 
                            self.x, self.y, x, y, fake_y, fake_x, self.use_mse)
        # G_gan_loss = tf.reduce_mean(tf.squared_difference(self.D_Y(fake_y), self.label))
        # F_gan_loss = tf.reduce_mean(tf.squared_difference(self.D_X(fake_x), self.label))
        # D_loss_x = self.discriminator_loss(self.D_X, x, self.x)
        # D_loss_y = self.discriminator_loss(self.D_Y, y, self.y)


        
        G_loss = G_gan_loss + cycle_loss
        F_loss = F_gan_loss + cycle_loss

        # summary
        tf.summary.histogram('D_Y/true', self.D_Y(self.y))
        tf.summary.histogram('D_Y/fake', self.D_Y(self.G(self.x)))
        tf.summary.histogram('D_X/true', self.D_X(self.x))
        tf.summary.histogram('D_X/fake', self.D_X(self.F(self.y)))

        tf.summary.scalar('loss/G', G_loss)
        tf.summary.scalar('loss/D_Y', D_loss_y)
        tf.summary.scalar('loss/F', F_loss)
        tf.summary.scalar('loss/D_X', D_loss_x)
        tf.summary.scalar('loss/cycle', cycle_loss)

        tf.summary.image('X/generated', utils.batch_convert2int(self.G(self.x)))
        tf.summary.image('X/reconstruction', utils.batch_convert2int(self.F(self.G(self.x))))
        tf.summary.image('Y/generated', utils.batch_convert2int(self.F(self.y)))
        tf.summary.image('Y/reconstruction', utils.batch_convert2int(self.G(self.F(self.y))))

        return G_loss, F_loss, D_loss_x, D_loss_y, fake_y, fake_x
Ejemplo n.º 30
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)
    content_img = get_img(options.content, (256, 256, 3)).astype(np.float32)
    content_img = np.reshape(content_img, (1, ) + content_img.shape)
    prediction = ffwd(content_img, options.style)
    save_img(options.output_path, prediction)
    print('Image saved to {}'.format(options.output_path))
Ejemplo n.º 31
0
 def __getitem__(self, idx):
     img_id = self.ids[idx]
     image = get_img(img_id, self.img_db)
     augmented = self.transforms(image=image)
     image = augmented['image']
     if self.preprocessing:
         pre = self.preprocessing(image=image)
         image = pre['image']
     return image
Ejemplo n.º 32
0
def main():
    parser = build_parser()
    options = parser.parse_args()
    check_opts(options)

    style_target = get_img(options.style)
    if not options.slow:
        content_targets = _get_files(options.train_path)
    elif options.test:
        content_targets = [options.test]

    kwargs = {
        "slow": options.slow,
        "epochs": options.epochs,
        "print_iterations": options.checkpoint_iterations,
        "batch_size": options.batch_size,
        "save_path": os.path.join(options.checkpoint_dir, 'fns.ckpt'),
        "learning_rate": options.learning_rate,
        "device_and_number": options.device_and_number
    }

    if options.slow:
        if options.epochs < 10:
            kwargs['epochs'] = 1000
        if options.learning_rate < 1:
            kwargs['learning_rate'] = 1e1

    args = [
        content_targets, style_target, options.content_weight,
        options.style_weight, options.tv_weight, options.vgg_path
    ]

    import time
    from datetime import datetime
    start_time = time.time()
    for preds, losses, i, epoch in optimize(*args, **kwargs):
        style_loss, content_loss, tv_loss, loss = losses
        delta_time, start_time = time.time() - start_time, time.time()
        print(
            'Current Time = {}; Time Elapsed = {}; Epoch = {}; Iteration = {}; Loss = {}'
            .format(datetime.now().strftime("%Y %B %d, %H:%M:%S"), delta_time,
                    epoch, i, loss))
        to_print = (style_loss, content_loss, tv_loss)
        print('Loss values: style = %s; content = %s; tv = %s' % to_print)
        sys.stdout.flush()
        if options.test:
            assert options.test_dir != False
            preds_path = '%s/%s_%s.png' % (options.test_dir, epoch, i)
            if not options.slow:  # if uses GPU, uses RAM that it doesn't have, so it's slow here
                ckpt_dir = os.path.dirname(options.checkpoint_dir)
                evaluate.ffwd_to_img(options.test, preds_path,
                                     options.checkpoint_dir)
            else:
                save_img(preds_path, img)
    ckpt_dir = options.checkpoint_dir
    cmd_text = 'python evaluate.py --checkpoint %s ...' % ckpt_dir
    print("Training complete. For evaluation:\n    `%s`" % cmd_text)
Ejemplo n.º 33
0
    def set_style(self, idx=None, random=False, window='Style Controls'):
        if idx is not None:
            self.idx = idx
        if random:
            self.idx = np.random.randint(len(self.style_imgs))

        style_file = self.style_imgs[self.idx]
        print('Loading style image',style_file)
        if self.crop_size > 0:
            self.style_rgb = get_img_crop(style_file, resize=self.img_size, crop=self.crop_size)
        else:
            self.style_rgb = resize_to(get_img(style_file), self.img_size)
        self.show_style(window, self.style_rgb)
Ejemplo n.º 34
0
def ffwd_different_dimensions(in_path, out_path, checkpoint_dir, 
            device_t=DEVICE, batch_size=4):
    in_path_of_shape = defaultdict(list)
    out_path_of_shape = defaultdict(list)
    for i in range(len(in_path)):
        in_image = in_path[i]
        out_image = out_path[i]
        shape = "%dx%dx%d" % get_img(in_image).shape
        in_path_of_shape[shape].append(in_image)
        out_path_of_shape[shape].append(out_image)
    for shape in in_path_of_shape:
        print('Processing images of shape %s' % shape)
        ffwd(in_path_of_shape[shape], out_path_of_shape[shape], 
            checkpoint_dir, device_t, batch_size)
Ejemplo n.º 35
0
            except AttributeError:
                content = e.summary_detail.value
            atom_feed.items.append(PyRSS2Gen.RSSItem(
                title = e.title,
                link  = e.link,
                author = unicode(person,"utf-8"),
                description = content,
                pubDate = e.updated
            ))

            if Site.CONTEXT.config.staff.has_option(person, config_avatar):
                _avatar = Site.CONTEXT.config.staff.get(person, config_avatar)
            else:
                _avatar = default_avatar

            img = utils.get_img(content)

            Site.CONTEXT.planet.blog.post.append(AttrDict(
                title = e.title,
                author = unicode(person,"utf-8"),
                author_url = blog.feed.link,
                author_avatar = _avatar,
                updated = e.updated_parsed,
                updated_str = e.updated,
                img = img,
                url = e.link,
                content = content,
                tags = map(lambda d:d.term,e.tags) if getattr(e,"tags",False) else [],
                comments = e.slash_comments if getattr(e,"slash_comments",False) else 0,
                text_content = strip_tags(content)))
Ejemplo n.º 36
0
    def on_tree_selection_changed(self, selection):
        """On sélectionne une ligne : on affiche la documentation de
        l'élément et son image associée."""

        def get_doc(path, items):
            """Colle les docs des éléments récursivement, dans le cas d'un path de la forme '0:1:2:x:y' (string).
            Retourne la documentation de tous ces éléments.
            """

            # code redondant avec get_item et get_apps ?
            if ':' in path:
                left = int( path[0:path.index(':')] )
                path = path[ path.index(':') + 1 :]
                doc =  get_doc(path, items[left]['items'])
            else:

                path = int(path)
                item_cour = items[path]

                if item_cour.has_key('cat'):

                    if item_cour.has_key('doc'):
                        doc = item_cour['doc']
                    else:
                        # todo : faire un résumé de toute la cat ?
                        # (reuse code get_apps)
                        doc = utils.construct_doc(item_cour['items'], item_cour['cat'])

                    return doc

                if item_cour.has_key('title') and item_cour['title'] not in self.labels.keys():
                    doc = utils.construct_doc(item_cour)
                    self.labels[item_cour['title']] = doc

                else:
                    # on a déjà calculé la doc, elle est stockés dans self.labels
                    doc = self.labels[item_cour['title']]

            return doc


        model, treeiter = selection.get_selected()
        if treeiter != None:

            path = self.treeModel.get_path(treeiter).to_string()

            doc =  get_doc(path, self.ITEMS)
            # self.frame2.set_label(model[treeiter][0])
            # self.label.set_text(doc)
            self.label.set_markup(doc)

            # Afficher l'image, s'il y a :

            it_cour = self.get_item(path)
            if it_cour.has_key('im'):
                # On ne connait pas l'image :
                if not it_cour['title'] in self.images:

                    image = utils.get_img(it_cour['im'].strip() )

                    if not image:
                        print 'No image to display for %s.' % it_cour['title']
                        # It does not work, so we don't want to try each time
                        # (slows dowmn the UI)
                        it_cour.pop('im')

                        self.im.set_from_resource(None)

                        return

                    self.images[it_cour['title']] = image.strip()
                    self.im.set_from_file(image)

                else:
                    # on connait déjà l'image associée
                    self.im.set_from_file(self.images[it_cour['title']])


            else:
                # il n'y a pas d'image
                # set_from_file(None) montrera
                # une icône, la ligne suivante n'affichera rien.
                self.im.set_from_resource(None)



            return
Ejemplo n.º 37
0
def main():
    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints, 
                                relu_targets=args.relu_targets,
                                vgg_path=args.vgg_path, 
                                device=args.device,
                                ss_patch_size=args.ss_patch_size, 
                                ss_stride=args.ss_stride)

    # Create needed dirs
    in_dir = os.path.join(args.tmp_dir, 'input')
    out_dir = os.path.join(args.tmp_dir, 'sytlized')
    if not os.path.exists(in_dir):
        os.makedirs(in_dir)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    if os.path.isdir(args.in_path):
        in_path = get_files(args.in_path)
    else: # Single image file
        in_path = [args.in_path]

    if os.path.isdir(args.style_path):
        style_files = get_files(args.style_path)
    else: # Single image file
        style_files = [args.style_path]

    print(style_files)
    import time
    # time.sleep(999)

    in_args = [
        'ffmpeg',
        '-i', args.in_path,
        '%s/frame_%%d.png' % in_dir
    ]

    subprocess.call(" ".join(in_args), shell=True)
    base_names = os.listdir(in_dir)
    in_files = [os.path.join(in_dir, x) for x in base_names]
    out_files = [os.path.join(out_dir, x) for x in base_names]

    


    s = time.time()
    for content_fullpath in in_path:
        content_prefix, content_ext = os.path.splitext(content_fullpath)
        content_prefix = os.path.basename(content_prefix)


        try:

            for style_fullpath in style_files:
                style_img = get_img(style_fullpath)
                if args.style_size > 0:
                    style_img = resize_to(style_img, args.style_size)
                if args.crop_size > 0:
                    style_img = center_crop(style_img, args.crop_size)

                style_prefix, _ = os.path.splitext(style_fullpath)
                style_prefix = os.path.basename(style_prefix)

                # print("ARRAY:  ", style_img)
                out_v = os.path.join(args.out_path, '{}_{}{}'.format(content_prefix, style_prefix, content_ext))
                print("OUT:",out_v)
                if os.path.isfile(out_v):
                    print("SKIP" , out_v)
                    continue
                
                for in_f, out_f in zip(in_files, out_files):
                    print('{} -> {}'.format(in_f, out_f))
                    content_img = get_img(in_f)

                    if args.keep_colors:
                        style_rgb = preserve_colors_np(style_img, content_img)
                    else:
                        style_rgb = style_img

                    stylized = wct_model.predict(content_img, style_rgb, args.alpha, args.swap5, args.ss_alpha)

                    if args.passes > 1:
                        for _ in range(args.passes-1):
                            stylized = wct_model.predict(stylized, style_rgb, args.alpha)

                    # Stitch the style + stylized output together, but only if there's one style image
                    if args.concat:
                        # Resize style img to same height as frame
                        style_img_resized = scipy.misc.imresize(style_rgb, (stylized.shape[0], stylized.shape[0]))
                        stylized = np.hstack([style_img_resized, stylized])

                    save_img(out_f, stylized)

                fr = 30
                out_args = [
                    'ffmpeg',
                    '-i', '%s/frame_%%d.png' % out_dir,
                    '-f', 'mp4',
                    '-q:v', '0',
                    '-vcodec', 'mpeg4',
                    '-r', str(fr),
                    '"' + out_v + '"'
                ]
                print(out_args)

                subprocess.call(" ".join(out_args), shell=True)
                print('Video at: %s' % out_v)

                if args.keep_tmp is True or len(style_files) > 1:
                    continue
                else:
                    shutil.rmtree(args.tmp_dir)
                print('Processed in:',(time.time() - s))

            print('Processed in:',(time.time() - s))
 
        except Exception as e:
            print("EXCEPTION: ",e)
Ejemplo n.º 38
0
def optimize(content_targets, style_target, content_weight, style_weight,
             tv_weight, vgg_path, epochs=2, print_iterations=1000,
             batch_size=4, save_path='saver/fns.ckpt', slow=False,
             learning_rate=1e-3, debug=False):
    if slow:
        batch_size = 1
    mod = len(content_targets) % batch_size
    if mod > 0:
        print("Train set has been trimmed slightly..")
        content_targets = content_targets[:-mod] 

    style_features = {}

    batch_shape = (batch_size,256,256,3)
    style_shape = (1,) + style_target.shape
    print(style_shape)

    # precompute style features
    with tf.Graph().as_default(), tf.device('/cpu:0'), tf.Session() as sess:
        style_image = tf.placeholder(tf.float32, shape=style_shape, name='style_image')
        style_image_pre = vgg.preprocess(style_image)
        net = vgg.net(vgg_path, style_image_pre)
        style_pre = np.array([style_target])
        for layer in STYLE_LAYERS:
            features = net[layer].eval(feed_dict={style_image:style_pre})
            features = np.reshape(features, (-1, features.shape[3]))
            gram = np.matmul(features.T, features) / features.size
            style_features[layer] = gram

    with tf.Graph().as_default(), tf.Session() as sess:
        X_content = tf.placeholder(tf.float32, shape=batch_shape, name="X_content")
        X_pre = vgg.preprocess(X_content)

        # precompute content features
        content_features = {}
        content_net = vgg.net(vgg_path, X_pre)
        content_features[CONTENT_LAYER] = content_net[CONTENT_LAYER]

        if slow:
            preds = tf.Variable(
                tf.random_normal(X_content.get_shape()) * 0.256
            )
            preds_pre = preds
        else:
            preds = transform.net(X_content/255.0)
            preds_pre = vgg.preprocess(preds)

        net = vgg.net(vgg_path, preds_pre)

        content_size = _tensor_size(content_features[CONTENT_LAYER])*batch_size
        assert _tensor_size(content_features[CONTENT_LAYER]) == _tensor_size(net[CONTENT_LAYER])
        content_loss = content_weight * (2 * tf.nn.l2_loss(
            net[CONTENT_LAYER] - content_features[CONTENT_LAYER]) / content_size
        )

        style_losses = []
        for style_layer in STYLE_LAYERS:
            layer = net[style_layer]
            bs, height, width, filters = map(lambda i:i.value,layer.get_shape())
            size = height * width * filters
            feats = tf.reshape(layer, (bs, height * width, filters))
            feats_T = tf.transpose(feats, perm=[0,2,1])
            grams = tf.matmul(feats_T, feats) / size
            style_gram = style_features[style_layer]
            style_losses.append(2 * tf.nn.l2_loss(grams - style_gram)/style_gram.size)

        style_loss = style_weight * functools.reduce(tf.add, style_losses) / batch_size

        # total variation denoising
        tv_y_size = _tensor_size(preds[:,1:,:,:])
        tv_x_size = _tensor_size(preds[:,:,1:,:])
        y_tv = tf.nn.l2_loss(preds[:,1:,:,:] - preds[:,:batch_shape[1]-1,:,:])
        x_tv = tf.nn.l2_loss(preds[:,:,1:,:] - preds[:,:,:batch_shape[2]-1,:])
        tv_loss = tv_weight*2*(x_tv/tv_x_size + y_tv/tv_y_size)/batch_size

        loss = content_loss + style_loss + tv_loss

        # overall loss
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(loss)
        sess.run(tf.global_variables_initializer())
        import random
        uid = random.randint(1, 100)
        print("UID: %s" % uid)
        for epoch in range(epochs):
            num_examples = len(content_targets)
            iterations = 0
            while iterations * batch_size < num_examples:
                start_time = time.time()
                curr = iterations * batch_size
                step = curr + batch_size
                X_batch = np.zeros(batch_shape, dtype=np.float32)
                for j, img_p in enumerate(content_targets[curr:step]):
                   X_batch[j] = get_img(img_p, (256,256,3)).astype(np.float32)

                iterations += 1
                assert X_batch.shape[0] == batch_size

                feed_dict = {
                   X_content:X_batch
                }

                train_step.run(feed_dict=feed_dict)
                end_time = time.time()
                delta_time = end_time - start_time
                if debug:
                    print("UID: %s, batch time: %s" % (uid, delta_time))
                is_print_iter = int(iterations) % print_iterations == 0
                if slow:
                    is_print_iter = epoch % print_iterations == 0
                is_last = epoch == epochs - 1 and iterations * batch_size >= num_examples
                should_print = is_print_iter or is_last
                if should_print:
                    to_get = [style_loss, content_loss, tv_loss, loss, preds]
                    test_feed_dict = {
                       X_content:X_batch
                    }

                    tup = sess.run(to_get, feed_dict = test_feed_dict)
                    _style_loss,_content_loss,_tv_loss,_loss,_preds = tup
                    losses = (_style_loss, _content_loss, _tv_loss, _loss)
                    if slow:
                       _preds = vgg.unprocess(_preds)
                    else:
                       saver = tf.train.Saver()
                       res = saver.save(sess, save_path)
                    yield(_preds, losses, iterations, epoch)
Ejemplo n.º 39
0
def main():
    start = time.time()

    # Load the WCT model
    wct_model = WCT(checkpoints=args.checkpoints, 
                                relu_targets=args.relu_targets,
                                vgg_path=args.vgg_path, 
                                device=args.device,
                                ss_patch_size=args.ss_patch_size, 
                                ss_stride=args.ss_stride)

    # Get content & style full paths
    if os.path.isdir(args.content_path):
        content_files = get_files(args.content_path)
    else: # Single image file
        content_files = [args.content_path]
    if os.path.isdir(args.style_path):
        style_files = get_files(args.style_path)
        if args.random > 0:
            style_files = np.random.choice(style_files, args.random)
    else: # Single image file
        style_files = [args.style_path]

    os.makedirs(args.out_path, exist_ok=True)

    count = 0

    ### Apply each style to each content image
    for content_fullpath in content_files:
        content_prefix, content_ext = os.path.splitext(content_fullpath)
        content_prefix = os.path.basename(content_prefix)  # Extract filename prefix without ext

        content_img = get_img(content_fullpath)
        if args.content_size > 0:
            content_img = resize_to(content_img, args.content_size)
        
        for style_fullpath in style_files: 
            style_prefix, _ = os.path.splitext(style_fullpath)
            style_prefix = os.path.basename(style_prefix)  # Extract filename prefix without ext

            # style_img = get_img_crop(style_fullpath, resize=args.style_size, crop=args.crop_size)
            # style_img = resize_to(get_img(style_fullpath), content_img.shape[0])

            style_img = get_img(style_fullpath)

            if args.style_size > 0:
                style_img = resize_to(style_img, args.style_size)
            if args.crop_size > 0:
                style_img = center_crop(style_img, args.crop_size)

            if args.keep_colors:
                style_img = preserve_colors_np(style_img, content_img)

            # if args.noise:  # Generate textures from noise instead of images
            #     frame_resize = np.random.randint(0, 256, frame_resize.shape, np.uint8)
            #     frame_resize = gaussian_filter(frame_resize, sigma=0.5)

            # Run the frame through the style network
            stylized_rgb = wct_model.predict(content_img, style_img, args.alpha, args.swap5, args.ss_alpha, args.adain)

            if args.passes > 1:
                for _ in range(args.passes-1):
                    stylized_rgb = wct_model.predict(stylized_rgb, style_img, args.alpha, args.swap5, args.ss_alpha, args.adain)

            # Stitch the style + stylized output together, but only if there's one style image
            if args.concat:
                # Resize style img to same height as frame
                style_img_resized = scipy.misc.imresize(style_img, (stylized_rgb.shape[0], stylized_rgb.shape[0]))
                # margin = np.ones((style_img_resized.shape[0], 10, 3)) * 255
                stylized_rgb = np.hstack([style_img_resized, stylized_rgb])

            # Format for out filename: {out_path}/{content_prefix}_{style_prefix}.{content_ext}
            out_f = os.path.join(args.out_path, '{}_{}{}'.format(content_prefix, style_prefix, content_ext))
            # out_f = f'{content_prefix}_{style_prefix}.{content_ext}'
            
            save_img(out_f, stylized_rgb)

            count += 1
            print("{}: Wrote stylized output image to {}".format(count, out_f))

    print("Finished stylizing {} outputs in {}s".format(count, time.time() - start))