Example #1
0
    def train(self, args):
        self.preTrain()

        trainable_variables = tf.trainable_variables()
        grads = tf.gradients(self.L_full, trainable_variables)

        self.lr = kld.plchf(None, 'learn_rate')
        optimizer = tf.train.AdamOptimizer(self.lr)
        optim = optimizer.apply_gradients(zip(grads, trainable_variables))

        yc = np.zeros([args.batch_size] + args.image_content['shape'],
                      dtype=np.float32)

        epoch = self.load_model()
        for epoch in range(epoch, args.num_epochs):

            kld.shuffle_list(self.ysL)
            kld.shuffle_list(self.ycL)
            lr = self.calc_learn_rate(epoch)

            #            for iter in range( args.num_iters ):
            for iter in range(int(args.num_iters / 2)):

                curr, last = self.next_idxs(iter)
                for j, path in enumerate(self.ycL[curr:last]):
                    yc[j] = self.load_image(path, args.image_content)
                ys = self.ysL[iter % len(self.ysL)]

                self.sess.run([optim],
                              feed_dict={
                                  self.yc1: yc,
                                  self.yc2: yc,
                                  self.ys: ys,
                                  self.lr: lr
                              })

                if self.time_to_eval(iter):
                    L_full, L_style, L_content, L_totvar = self.sess.run(
                        [
                            self.L_full, self.L_style, self.L_content,
                            self.L_totvar
                        ],
                        feed_dict={
                            self.yc1: yc,
                            self.yc2: yc,
                            self.ys: ys
                        })
                    self.print_counters(epoch, iter)
                    print(
                        '|| L_full : %3.5e | L_style : %3.5e | L_content : %3.5e | L_totvar : %3.5e'
                        % (L_full, L_style, L_content, L_totvar),
                        end='')
                    self.values.append(
                        [epoch, iter, L_full, L_style, L_content, L_totvar])
                    self.print_time(epoch, iter)

            self.save_model()
    def test(self, args):
        self.preTest()

        size1, size2, pad = 1024, 256, 32
        model_name = kld.basename(args.model_dir)
        suffix = '%s_%d.jpg' % (model_name, args.image_test['size'])

        yc = np.zeros([args.batch_size] + args.image_content['shape'],
                      dtype=np.float32)

        yc0 = self.load_image(self.ycL[0], args.image_content)
        hc, wc, cc = yc0.shape
        n1c = int(np.ceil(max(hc, wc) / size1))
        n2c = int(np.ceil(max(hc, wc) / size2))

        yc0 = self.load_image(self.ycL[0], args.image_test)
        ht, wt, ct = yc0.shape
        n1t = int(np.ceil(max(ht, wt) / size1))
        n2t = int(np.ceil(max(ht, wt) / size2))

        self.load_model()
        self.sess.run(self.op_updt)
        for epoch in range(0, args.num_epochs):

            kld.shuffle_list(self.ycL)
            lr = self.calc_learn_rate(epoch)

            files = kld.get_dir_files(args.input_dir)
            for file in files:

                print('%d - %s' % (args.image_test['size'], file))

                file_name = kld.basename(file)[:-4]
                file_dir = '%s/%s' % (args.input_dir, file_name)
                kld.make_dir(file_dir)

                input = self.load_image(file, args.image_test)

                small1 = scipy.misc.imresize(input,
                                             1.0 / n1t,
                                             interp='nearest')
                output1 = self.sess.run(self.yh1,
                                        feed_dict={
                                            self.x: [input],
                                            self.xs1: [small1]
                                        })
                path = '%s/full4A_%02d_%s_%s' % (file_dir, epoch, file_name,
                                                 suffix)
                kld.save_image(output1, path)

                small2 = scipy.misc.imresize(input,
                                             1.0 / n2t,
                                             interp='nearest')
                output2 = self.sess.run(self.yh2,
                                        feed_dict={
                                            self.x: [input],
                                            self.xs2: [small2]
                                        })
                path = '%s/full4B_%02d_%s_%s' % (file_dir, epoch, file_name,
                                                 suffix)
                kld.save_image(output2, path)

                print(input.shape)
                print(small1.shape)
                print(small2.shape)
                print(n1c, n2c, n1t, n2t)

            for iter in range(args.num_iters):

                print(epoch, args.num_epochs, iter, args.num_iters)

                yc_small1, yc_small2 = [], []
                curr, last = self.next_idxs(iter)
                for j, path in enumerate(self.ycL[curr:last]):
                    yc[j] = self.load_image(path, args.image_content)
                    yc_small1.append(
                        scipy.misc.imresize(yc[j], 1.0 / n1c,
                                            interp='nearest'))
                    yc_small2.append(
                        scipy.misc.imresize(yc[j], 1.0 / n2c,
                                            interp='nearest'))

                self.sess.run(
                    [self.optim],
                    feed_dict={
                        self.x: yc,
                        self.xs1: yc_small1,
                        self.xs2: yc_small2,
                        self.lr: lr
                    })
Example #3
0
    def train(self, args):
        self.preTrain()

        trainable_variables = tf.trainable_variables()
        grads = tf.gradients(self.L_full, trainable_variables)

        self.lr = kld.plchf(None, 'learn_rate')
        optimizer = tf.train.AdamOptimizer(self.lr)
        optim = optimizer.apply_gradients(zip(grads, trainable_variables))

        yc = np.zeros([args.batch_size] + args.image_content['shape'],
                      dtype=np.float32)

        size, pad = 256, 32
        h, w, c = yc[0].shape
        n = int(np.ceil(max(h, w) / size))
        hs, ws = int(h / n), int(w / n)

        epoch = self.load_model()
        for epoch in range(epoch, args.num_epochs):

            kld.shuffle_list(self.ysL)
            kld.shuffle_list(self.ycL)
            lr = self.calc_learn_rate(epoch)

            #            for iter in range( args.num_iters ):
            for iter in range(int(args.num_iters / 2)):

                curr, last = self.next_idxs(iter)
                for j, path in enumerate(self.ycL[curr:last]):
                    yc[j] = self.load_image(path, args.image_content)
                ys = self.ysL[iter % len(self.ysL)]

                yc_small = []
                for y in yc:
                    yc_small.append(y)
                    #yc_small.append( scipy.misc.imresize( y , 1.0 / n , interp = 'nearest' ) )
                pars = self.sess.run(args.net.pars_calc,
                                     feed_dict={self.yc: yc_small})

                #import scipy
                #small = scipy.misc.imresize( input , 1.0 / n , interp = 'nearest' )
                #pars = self.sess.run( args.net.pars_calc , feed_dict = { self.x : small } )

                #pars_dict = {}
                #for i in range( len( pars ) ):
                #    pars_dict[args.net.pars_eval[i]] = pars[i]
                #output = self.sess.run( self.yh , feed_dict = { **{ self.x : input } , **pars_dict } )

                pars_dict = {}
                for i in range(len(pars)):
                    pars_dict[args.net.pars_eval[i]] = pars[i]

                self.sess.run([optim],
                              feed_dict={
                                  **{
                                      self.yc: yc,
                                      self.ys: ys,
                                      self.lr: lr
                                  },
                                  **pars_dict
                              })

                if self.time_to_eval(iter):
                    L_full, L_style, L_content, L_totvar = self.sess.run(
                        [
                            self.L_full, self.L_style, self.L_content,
                            self.L_totvar
                        ],
                        feed_dict={
                            **{
                                self.yc: yc,
                                self.ys: ys
                            },
                            **pars_dict
                        })
                    self.print_counters(epoch, iter)
                    print(
                        '|| L_full : %3.5e | L_style : %3.5e | L_content : %3.5e | L_totvar : %3.5e'
                        % (L_full, L_style, L_content, L_totvar),
                        end='')
                    self.values.append(
                        [epoch, iter, L_full, L_style, L_content, L_totvar])
                    self.print_time(epoch, iter)

            self.save_model()