Пример #1
0
    def build(self, args):
        self.preBuild()

        self.ys = kld.plchf([None, None, 3], 'style')
        self.yc = kld.plchf([None, None, None, 3], 'content')

        with tf.variable_scope('', reuse=tf.AUTO_REUSE):

            args.net.type = 'calc'
            args.net.build(self.yc)

            args.net.type = 'eval'
            self.yh = args.net.build(self.yc)

        self.ysi = tf.expand_dims(self.ys, 0)

        style_layers = args.vgg_net.feed_forward(self.ysi, 'style')
        content_layers = args.vgg_net.feed_forward(self.yc, 'content')
        self.Fs = args.vgg_net.feed_forward(self.yh, 'mixed')

        self.Ss = {}
        for id in self.style_layers:
            self.Ss[id] = style_layers[id]

        self.Cs = {}
        for id in self.content_layers:
            self.Cs[id] = content_layers[id]

        L_style, L_content = 0, 0
        for id in self.Fs:

            if id in self.style_layers:

                F = kld.gram_matrix(self.Fs[id])
                S = kld.gram_matrix(self.Ss[id])

                b, d1, d2 = kld.get_shape(F)
                bd1d2 = kld.toFloat(b * d1 * d2)
                wgt = self.style_layers[id]

                L_style += wgt * 2 * tf.nn.l2_loss(F - S) / bd1d2

            if id in self.content_layers:

                F = self.Fs[id]
                C = self.Cs[id]

                b, h, w, d = kld.get_shape(F)
                bhwd = kld.toFloat(b * h * w * d)
                wgt = self.content_layers[id]

                L_content += wgt * 2 * tf.nn.l2_loss(F - C) / bhwd

        L_totvar = kld.total_variation_loss(self.yh)

        self.L_style = args.wgt_style * L_style
        self.L_content = args.wgt_content * L_content
        self.L_totvar = args.wgt_totvar * L_totvar
        self.L_full = self.L_style + self.L_content + self.L_totvar
Пример #2
0
    def build(self, args):
        self.preBuild()

        self.x = kld.plchf([None, None, 3], 'input')
        self.xi = tf.expand_dims(self.x, 0)
        self.xs = kld.plchf([None, None, 3], 'small')
        self.xsi = tf.expand_dims(self.xs, 0)

        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            args.net.type = 'calc'
            args.net.build(self.xsi)
            args.net.type = 'eval'
            self.yh = args.net.build(self.xi)
            self.yh = tf.squeeze(self.yh)
            self.yh = tf.clip_by_value(self.yh, 0.0, 255.0)
Пример #3
0
    def build(self, args):
        self.preBuild()

        self.x = kld.plchf([None, None, 3], 'input')
        self.xi = tf.expand_dims(self.x, 0)

        self.yh = args.net.build(self.xi)
        self.yh = tf.squeeze(self.yh)
        self.yh = tf.clip_by_value(self.yh, 0.0, 255.0)
Пример #4
0
    def train(self, args):
        self.preTrain()

        trainable_variables = tf.trainable_variables()
        grads = tf.gradients(self.L_full, trainable_variables)

        self.lr = kld.plchf(None, 'learn_rate')
        optimizer = tf.train.AdamOptimizer(self.lr)
        optim = optimizer.apply_gradients(zip(grads, trainable_variables))

        yc = np.zeros([args.batch_size] + args.image_content['shape'],
                      dtype=np.float32)

        epoch = self.load_model()
        for epoch in range(epoch, args.num_epochs):

            kld.shuffle_list(self.ysL)
            kld.shuffle_list(self.ycL)
            lr = self.calc_learn_rate(epoch)

            #            for iter in range( args.num_iters ):
            for iter in range(int(args.num_iters / 2)):

                curr, last = self.next_idxs(iter)
                for j, path in enumerate(self.ycL[curr:last]):
                    yc[j] = self.load_image(path, args.image_content)
                ys = self.ysL[iter % len(self.ysL)]

                self.sess.run([optim],
                              feed_dict={
                                  self.yc1: yc,
                                  self.yc2: yc,
                                  self.ys: ys,
                                  self.lr: lr
                              })

                if self.time_to_eval(iter):
                    L_full, L_style, L_content, L_totvar = self.sess.run(
                        [
                            self.L_full, self.L_style, self.L_content,
                            self.L_totvar
                        ],
                        feed_dict={
                            self.yc1: yc,
                            self.yc2: yc,
                            self.ys: ys
                        })
                    self.print_counters(epoch, iter)
                    print(
                        '|| L_full : %3.5e | L_style : %3.5e | L_content : %3.5e | L_totvar : %3.5e'
                        % (L_full, L_style, L_content, L_totvar),
                        end='')
                    self.values.append(
                        [epoch, iter, L_full, L_style, L_content, L_totvar])
                    self.print_time(epoch, iter)

            self.save_model()
Пример #5
0
    def build(self, args):
        self.preBuild()

        self.x = kld.plchf([None, None, None, 3], 'input')
        self.xs1 = kld.plchf([None, None, None, 3], 'small1')
        self.xs2 = kld.plchf([None, None, None, 3], 'small2')

        with tf.variable_scope('', reuse=tf.AUTO_REUSE):
            args.net.type = 'calc'
            self.pars1 = args.net.build(self.xs1)
            args.net.type = 'eval'
            self.yh1 = args.net.build(self.x)
            self.yh1 = tf.squeeze(self.yh1)
            self.yh1 = tf.clip_by_value(self.yh1, 0.0, 255.0)
        vars_orig = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='')
        self.saver = tf.train.Saver()

        with tf.variable_scope('updt', reuse=tf.AUTO_REUSE):
            args.net.type = 'calc'
            self.pars2 = args.net.build(self.xs2)
            args.net.type = 'eval'
            self.yh2 = args.net.build(self.x)
            self.yh2 = tf.squeeze(self.yh2)
            self.yh2 = tf.clip_by_value(self.yh2, 0.0, 255.0)
        vars_updt = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                      scope='updt')

        self.op_updt = []
        for (u, o) in zip(vars_updt, vars_orig):
            self.op_updt.append(tf.assign(u, o))

        self.loss = tf.reduce_mean(tf.square(self.yh2 - self.yh1))
        grads_updt = tf.gradients(self.loss, vars_updt)

        self.lr = kld.plchf(None, 'learn_rate')
        optimizer = tf.train.AdamOptimizer(self.lr)
        self.optim = optimizer.apply_gradients(zip(grads_updt, vars_updt))
Пример #6
0
    def train(self, args):
        self.preTrain()

        trainable_variables = tf.trainable_variables()
        grads = tf.gradients(self.L_full, trainable_variables)

        self.lr = kld.plchf(None, 'learn_rate')
        optimizer = tf.train.AdamOptimizer(self.lr)
        optim = optimizer.apply_gradients(zip(grads, trainable_variables))

        yc = np.zeros([args.batch_size] + args.image_content['shape'],
                      dtype=np.float32)

        size, pad = 256, 32
        h, w, c = yc[0].shape
        n = int(np.ceil(max(h, w) / size))
        hs, ws = int(h / n), int(w / n)

        epoch = self.load_model()
        for epoch in range(epoch, args.num_epochs):

            kld.shuffle_list(self.ysL)
            kld.shuffle_list(self.ycL)
            lr = self.calc_learn_rate(epoch)

            #            for iter in range( args.num_iters ):
            for iter in range(int(args.num_iters / 2)):

                curr, last = self.next_idxs(iter)
                for j, path in enumerate(self.ycL[curr:last]):
                    yc[j] = self.load_image(path, args.image_content)
                ys = self.ysL[iter % len(self.ysL)]

                yc_small = []
                for y in yc:
                    yc_small.append(y)
                    #yc_small.append( scipy.misc.imresize( y , 1.0 / n , interp = 'nearest' ) )
                pars = self.sess.run(args.net.pars_calc,
                                     feed_dict={self.yc: yc_small})

                #import scipy
                #small = scipy.misc.imresize( input , 1.0 / n , interp = 'nearest' )
                #pars = self.sess.run( args.net.pars_calc , feed_dict = { self.x : small } )

                #pars_dict = {}
                #for i in range( len( pars ) ):
                #    pars_dict[args.net.pars_eval[i]] = pars[i]
                #output = self.sess.run( self.yh , feed_dict = { **{ self.x : input } , **pars_dict } )

                pars_dict = {}
                for i in range(len(pars)):
                    pars_dict[args.net.pars_eval[i]] = pars[i]

                self.sess.run([optim],
                              feed_dict={
                                  **{
                                      self.yc: yc,
                                      self.ys: ys,
                                      self.lr: lr
                                  },
                                  **pars_dict
                              })

                if self.time_to_eval(iter):
                    L_full, L_style, L_content, L_totvar = self.sess.run(
                        [
                            self.L_full, self.L_style, self.L_content,
                            self.L_totvar
                        ],
                        feed_dict={
                            **{
                                self.yc: yc,
                                self.ys: ys
                            },
                            **pars_dict
                        })
                    self.print_counters(epoch, iter)
                    print(
                        '|| L_full : %3.5e | L_style : %3.5e | L_content : %3.5e | L_totvar : %3.5e'
                        % (L_full, L_style, L_content, L_totvar),
                        end='')
                    self.values.append(
                        [epoch, iter, L_full, L_style, L_content, L_totvar])
                    self.print_time(epoch, iter)

            self.save_model()