Exemplo n.º 1
0
def main(FLAGS):
    style_features_t = losses.get_style_features(FLAGS)

    # Make sure the training path exists.
    training_path = os.path.join(FLAGS.model_path, FLAGS.naming)
    if not(os.path.exists(training_path)):
        os.makedirs(training_path)

    with tf.Graph().as_default():
        with tf.Session() as sess:
            """Build Network"""
            network_fn = nets_factory.get_network_fn(
                FLAGS.loss_model,
                num_classes=1,
                is_training=False)

            image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)
            processed_images = reader.image(FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size,
                                            'train2014/', image_preprocessing_fn, epochs=FLAGS.epoch)
            generated = model.net(processed_images, training=True)
            processed_generated = [image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
                                   for image in tf.unstack(generated, axis=0, num=FLAGS.batch_size)
                                   ]
            processed_generated = tf.stack(processed_generated)
            _, endpoints_dict = network_fn(tf.concat([processed_generated, processed_images], 0), spatial_squeeze=False)

            # Log the structure of loss network
            tf.logging.info('Loss network layers(You can define them in "content_layers" and "style_layers"):')
            for key in endpoints_dict:
                tf.logging.info(key)

            """Build Losses"""
            content_loss = losses.content_loss(endpoints_dict, FLAGS.content_layers)
            style_loss, style_loss_summary = losses.style_loss(endpoints_dict, style_features_t, FLAGS.style_layers)
            tv_loss = losses.total_variation_loss(generated)  # use the unprocessed image

            loss = FLAGS.style_weight * style_loss + FLAGS.content_weight * content_loss + FLAGS.tv_weight * tv_loss

            # Add Summary for visualization in tensorboard.
            """Add Summary"""
            tf.summary.scalar('losses/content_loss', content_loss)
            tf.summary.scalar('losses/style_loss', style_loss)
            tf.summary.scalar('losses/regularizer_loss', tv_loss)

            tf.summary.scalar('weighted_losses/weighted_content_loss', content_loss * FLAGS.content_weight)
            tf.summary.scalar('weighted_losses/weighted_style_loss', style_loss * FLAGS.style_weight)
            tf.summary.scalar('weighted_losses/weighted_regularizer_loss', tv_loss * FLAGS.tv_weight)
            tf.summary.scalar('total_loss', loss)

            for layer in FLAGS.style_layers:
                tf.summary.scalar('style_losses/' + layer, style_loss_summary[layer])
            tf.summary.image('generated', generated)
            # tf.image_summary('processed_generated', processed_generated)  # May be better?
            tf.summary.image('origin', tf.stack([
                image_unprocessing_fn(image) for image in tf.unstack(processed_images, axis=0, num=FLAGS.batch_size)
            ]))
            summary = tf.summary.merge_all()
            writer = tf.summary.FileWriter(training_path)

            """Prepare to Train"""
            global_step = tf.Variable(0, name="global_step", trainable=False)

            variable_to_train = []
            for variable in tf.trainable_variables():
                if not(variable.name.startswith(FLAGS.loss_model)):
                    variable_to_train.append(variable)
            train_op = tf.train.AdamOptimizer(1e-3).minimize(loss, global_step=global_step, var_list=variable_to_train)

            variables_to_restore = []
            for v in tf.global_variables():
                if not(v.name.startswith(FLAGS.loss_model)):
                    variables_to_restore.append(v)
            saver = tf.train.Saver(variables_to_restore, write_version=tf.train.SaverDef.V1)

            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

            # Restore variables for loss network.
            init_func = utils._get_init_fn(FLAGS)
            init_func(sess)

            # Restore variables for training model if the checkpoint file exists.
            last_file = tf.train.latest_checkpoint(training_path)
            if last_file:
                tf.logging.info('Restoring model from {}'.format(last_file))
                saver.restore(sess, last_file)

            """Start Training"""
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            start_time = time.time()
            try:
                while not coord.should_stop():
                    _, loss_t, step = sess.run([train_op, loss, global_step])
                    elapsed_time = time.time() - start_time
                    start_time = time.time()
                    """logging"""
                    # print(step)
                    if step % 10 == 0:
                        tf.logging.info('step: %d,  total Loss %f, secs/step: %f' % (step, loss_t, elapsed_time))
                    """summary"""
                    if step % 25 == 0:
                        tf.logging.info('adding summary...')
                        summary_str = sess.run(summary)
                        writer.add_summary(summary_str, step)
                        writer.flush()
                    """checkpoint"""
                    if step % 1000 == 0:
                        saver.save(sess, os.path.join(training_path, 'fast-style-model.ckpt'), global_step=step)
            except tf.errors.OutOfRangeError:
                saver.save(sess, os.path.join(training_path, 'fast-style-model.ckpt-done'))
                tf.logging.info('Done training -- epoch limit reached')
            finally:
                coord.request_stop()
            coord.join(threads)
def main(FLAGS):
    # 得到风格特征
    style_features_t = losses.get_style_features(FLAGS)

    # Make sure the training path exists.
    training_path = os.path.join(FLAGS.model_path, FLAGS.naming)
    if not (os.path.exists(training_path)):
        os.makedirs(training_path)

    with tf.Graph().as_default():
        with tf.Session() as sess:
            """Build Network"""
            # 构造vgg网络,按照FLAGS.loss_model中的网络名字,可以在/nets/nets_factory.py 中的networks_map找到对应
            network_fn = nets_factory.get_network_fn(FLAGS.loss_model,
                                                     num_classes=1,
                                                     is_training=False)
            # 根据不同网络做不同的预处理
            image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model, is_training=False)
            # 读取一个批次的数据,并且预处理
            # 这里的数据你可以不用coco,可以直接给一个包含很多图片的文件夹即可
            # 因为coco过于大
            processed_images = reader.image(FLAGS.batch_size,
                                            FLAGS.image_height,
                                            FLAGS.image_width,
                                            'F:/CASIA/train_frame/real/',
                                            image_preprocessing_fn,
                                            epochs=FLAGS.epoch)
            # 通过生成网络,生成图片,相当于y^
            generated = model.net(processed_images, training=True)
            # 因为一会要把生成图片喂入到后面vgg进行计算两个损失,所以要先进行预处理
            processed_generated = [
                image_preprocessing_fn(image, FLAGS.image_height,
                                       FLAGS.image_width) for image in
                tf.unstack(generated, axis=0, num=FLAGS.batch_size)
            ]
            # 因为上面是list格式,所以用tf.stack堆叠成tensor
            processed_generated = tf.stack(processed_generated)
            # 按照batch那一个维度,拼起来,比如原来两个是[batch_size,h,w,c],concat后变为[2*batch_size,h,w,c]
            # 这样一次前向传播把y^ 和y_c的特征都计算出来了
            _, endpoints_dict = network_fn(tf.concat(
                [processed_generated, processed_images], 0),
                                           spatial_squeeze=False)

            # Log the structure of loss network
            tf.logging.info(
                'Loss network layers(You can define them in "content_layers" and "style_layers"):'
            )
            for key in endpoints_dict:
                tf.logging.info(key)
            """Build Losses"""
            # 计算三个损失
            content_loss = losses.content_loss(endpoints_dict,
                                               FLAGS.content_layers)
            style_loss, style_loss_summary = losses.style_loss(
                endpoints_dict, style_features_t, FLAGS.style_layers)
            tv_loss = losses.total_variation_loss(
                generated)  # use the unprocessed image

            loss = FLAGS.style_weight * style_loss + FLAGS.content_weight * content_loss + FLAGS.tv_weight * tv_loss

            # Add Summary for visualization in tensorboard.
            """Add Summary"""
            # 为了tensorboard,可以忽略
            tf.summary.scalar('losses/content_loss', content_loss)
            tf.summary.scalar('losses/style_loss', style_loss)
            tf.summary.scalar('losses/regularizer_loss', tv_loss)

            tf.summary.scalar('weighted_losses/weighted_content_loss',
                              content_loss * FLAGS.content_weight)
            tf.summary.scalar('weighted_losses/weighted_style_loss',
                              style_loss * FLAGS.style_weight)
            tf.summary.scalar('weighted_losses/weighted_regularizer_loss',
                              tv_loss * FLAGS.tv_weight)
            tf.summary.scalar('total_loss', loss)

            for layer in FLAGS.style_layers:
                tf.summary.scalar('style_losses/' + layer,
                                  style_loss_summary[layer])
            tf.summary.image('generated', generated)
            # tf.image_summary('processed_generated', processed_generated)  # May be better?
            tf.summary.image(
                'origin',
                tf.stack([
                    image_unprocessing_fn(image) for image in tf.unstack(
                        processed_images, axis=0, num=FLAGS.batch_size)
                ]))
            summary = tf.summary.merge_all()
            writer = tf.summary.FileWriter(training_path)
            """Prepare to Train"""
            # 步数
            global_step = tf.Variable(0, name="global_step", trainable=False)

            variable_to_train = []
            for variable in tf.trainable_variables():
                # 把非vgg网络里面的可训练变量加入variable_to_train
                if not (variable.name.startswith(FLAGS.loss_model)):
                    variable_to_train.append(variable)
            # 注意var_list
            train_op = tf.train.AdamOptimizer(1e-3).minimize(
                loss, global_step=global_step, var_list=variable_to_train)

            variables_to_restore = []
            for v in tf.global_variables():
                # 把非vgg中的可存储变量加入variables_to_restore
                if not (v.name.startswith(FLAGS.loss_model)):
                    variables_to_restore.append(v)

            # 注意variables_to_restore
            saver = tf.train.Saver(variables_to_restore,
                                   write_version=tf.train.SaverDef.V1)

            sess.run([
                tf.global_variables_initializer(),
                tf.local_variables_initializer()
            ])

            # Restore variables for loss network.
            # slim的,可以根据FLAGS里面配置把网络参数加载到sess这个会话里面
            init_func = utils._get_init_fn(FLAGS)
            init_func(sess)

            # Restore variables for training model if the checkpoint file exists.
            last_file = tf.train.latest_checkpoint(training_path)
            if last_file:
                tf.logging.info('Restoring model from {}'.format(last_file))
                saver.restore(sess, last_file)
            """Start Training"""
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            start_time = time.time()
            try:
                while not coord.should_stop():
                    _, loss_t, step = sess.run([train_op, loss, global_step])
                    elapsed_time = time.time() - start_time
                    start_time = time.time()
                    """logging"""
                    print(step)
                    if step % 10 == 0:
                        tf.logging.info(
                            'step: %d,  total Loss %f, secs/step: %f' %
                            (step, loss_t, elapsed_time))
                    """summary"""
                    if step % 25 == 0:
                        tf.logging.info('adding summary...')
                        summary_str = sess.run(summary)
                        writer.add_summary(summary_str, step)
                        writer.flush()
                    """checkpoint"""
                    if step % 1000 == 0:
                        saver.save(sess,
                                   os.path.join(training_path,
                                                'fast-style-model.ckpt'),
                                   global_step=step)
            except tf.errors.OutOfRangeError:
                saver.save(
                    sess,
                    os.path.join(training_path, 'fast-style-model.ckpt-done'))
                tf.logging.info('Done training -- epoch limit reached')
            finally:
                coord.request_stop()
            coord.join(threads)
Exemplo n.º 3
0
def main(argv=None):
    content_layers = FLAGS.content_layers.split(',')
    style_layers = FLAGS.style_layers.split(',')
    style_layers_weights = [
        float(i) for i in FLAGS.style_layers_weights.split(",")
    ]
    #num_steps_decay = 82786 / FLAGS.batch_size
    num_steps_decay = 10000

    style_features_t = losses.get_style_features(FLAGS)
    training_path = os.path.join(FLAGS.model_path, FLAGS.naming)
    if not (os.path.exists(training_path)):
        os.makedirs(training_path)

    with tf.Session() as sess:
        """Build Network"""
        network_fn = nets_factory.get_network_fn(FLAGS.loss_model,
                                                 num_classes=1,
                                                 is_training=False)
        image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
            FLAGS.loss_model, is_training=False)
        processed_images = reader.image(FLAGS.batch_size,
                                        FLAGS.image_size,
                                        FLAGS.image_size,
                                        'train2014/',
                                        image_preprocessing_fn,
                                        epochs=FLAGS.epoch)
        generated = model.net(processed_images, FLAGS.alpha)
        processed_generated = [
            image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
            for image in tf.unstack(generated, axis=0, num=FLAGS.batch_size)
        ]
        processed_generated = tf.stack(processed_generated)
        _, endpoints_dict = network_fn(tf.concat(
            [processed_generated, processed_images], 0),
                                       spatial_squeeze=False)
        """Build Losses"""
        content_loss = losses.content_loss(endpoints_dict, content_layers)
        style_loss, style_losses = losses.style_loss(endpoints_dict,
                                                     style_features_t,
                                                     style_layers,
                                                     style_layers_weights)
        tv_loss = losses.total_variation_loss(
            generated)  # use the unprocessed image
        content_loss = FLAGS.content_weight * content_loss
        style_loss = FLAGS.style_weight * style_loss
        tv_loss = FLAGS.tv_weight * tv_loss
        loss = style_loss + content_loss + tv_loss
        """Prepare to Train"""
        global_step = tf.Variable(0, name="global_step", trainable=False)
        variable_to_train = []
        for variable in tf.trainable_variables():
            if not (variable.name.startswith(FLAGS.loss_model)):
                variable_to_train.append(variable)

        lr = tf.train.exponential_decay(learning_rate=1e-1,
                                        global_step=global_step,
                                        decay_steps=num_steps_decay,
                                        decay_rate=1e-1,
                                        staircase=True)
        optimizer = tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-8)
        train_op = optimizer.minimize(loss,
                                      global_step=global_step,
                                      var_list=variable_to_train)
        #train_op = tf.train.AdamOptimizer(1e-3).minimize(loss, global_step=global_step, var_list=variable_to_train)
        variables_to_restore = []
        for v in tf.global_variables():
            if not (v.name.startswith(FLAGS.loss_model)):
                variables_to_restore.append(v)
        saver = tf.train.Saver(variables_to_restore)
        sess.run([
            tf.global_variables_initializer(),
            tf.local_variables_initializer()
        ])
        init_func = utils._get_init_fn(FLAGS)
        init_func(sess)
        last_file = tf.train.latest_checkpoint(training_path)
        if last_file:
            saver.restore(sess, last_file)
        """Start Training"""
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)
        try:
            while not coord.should_stop():
                _, c_loss, s_losses, t_loss, total_loss, step = sess.run([
                    train_op, content_loss, style_losses, tv_loss, loss,
                    global_step
                ])
                """logging"""
                if step % 10 == 0:
                    print(step, c_loss, s_losses, t_loss, total_loss)
                """checkpoint"""
                if step % 10000 == 0:
                    saver.save(sess,
                               os.path.join(training_path, 'fast-style-model'),
                               global_step=step)
                if step == FLAGS.max_iter:
                    saver.save(
                        sess,
                        os.path.join(training_path, 'fast-style-model-done'))
                    break
        except tf.errors.OutOfRangeError:
            saver.save(sess,
                       os.path.join(training_path, 'fast-style-model-done'))
            tf.logging.info('Done training -- epoch limit reached')
        finally:
            coord.request_stop()
        coord.join(threads)
Exemplo n.º 4
0
def main(FLAGS):
    style_features_t = losses.get_style_features(FLAGS) #style target的Gram

    #make sure the training path exists
    training_path = os.path.join(FLAGS.model_path,FLAGS.naming) #model/wave/ ;用于存放训练好的模型
    if not (os.path.exists(training_path)):
        os.makedirs(training_path)

    with tf.Graph().as_default(): #默认计算图
        with tf.Session() as sess:#没有as_default(),因此,走出with 语句,sess停止执行,不能在被用
            """build loss network"""
            network_fn =nets_factory.get_network_fn(FLAGS.loss_model,num_classes=1,is_training=False) #取出loss model,且该model不用训练
            #对要进入loss_model的content_image,和generated_image进行preprocessing
            image_preprocessing_fn,image_unpreprocessing_fn = preprocessing_factory.get_preprocessing(FLAGS.loss_model,is_training=False) #取出用于loss_model的,对image进行preprocessing和unpreprocessing的function
            processed_image = reader.image(FLAGS.batch_size,FLAGS.image_size,FLAGS.image_size,'train2014/',image_preprocessing_fn,epochs=FLAGS.epoch) #这里要preprocessing的image是一个batch,为training_data
            generated = model.net(processed_images,training=True) #输入“图像生成网络”的image为经过preprocessing_image,“图像生成网络”为要训练的网络
            processed_generated = [image_preprocessing_fn(image,FLAGS.image_size,FLAGS.image_size) for image in tf.unstack(generated,axis=0,num=FLAGS.batch_size)]
            processed_generated = tf.stack(processed_generated)
            #计算generated_image和content_image进入loss_model后,更layer的output
            _,endpoints_dict= network_fn(tf.concat([processed_generated,processed_images],0),spatial_squeeze=False)#endpoints_dict中存储的是2类image各个layer的值
            #log the structure of loss network
            tf.logging.info('loss network layers(you can define them in "content layer" and "style layer"):')
            for key in endpoints_dict:
                tf.logging.info(key) #屏幕输出loss_model的各个layer name

            """build losses"""
            content_loss = losses.content_loss(endpoints_dict,FLAGS.content_layers)
            style_loss,style_loss_summary = losses.style_loss(endpoints_dict,style_features_t,FLAGS.style_layers)
            tv_loss = losses.total_variation_loss(generated)

            loss = FLAGS.style_weight * style_loss + FLAGS.content_weight * content_loss + FLAGS.tv_weight * tv_loss

            # Add Summary for visualization in tensorboard
            """Add Summary"""
            tf.summary.scalar('losses/content_loss',content_loss)
            tf.summary.scalar('losses/style_loss',style_loss)
            tf.summary.scalar('losses/regularizer_loss',tv_loss)

            tf.summary.scalar('weighted_losses/weighted content_loss',content_loss * FLAGS.content_weight)
            tf.summary.scalar('weighted_losses/weighted style_loss',style_loss * FLAGS.style_weight)
            tf.summary.scalar('weighted_losses/weighted_regularizer_loss',tv_loss * FLAGS.tv_weight)
            tf.summary.scalar('total_loss',loss)

            for layer in FLAGS.style_layers:
                tf.summary.scalar('style_losses/' + layer,style_loss_summary[layer])
            tf.summary.image('genearted',generated)
            tf.summary.image('origin',tf.stack([image_unprocessing_fn(image) for image in tf.unstack(processed_images,axis=0,num=FLAGS.batch_size)]))
            summary = tf.summary.merge_all()
            writer = tf.summary.FileWriter(training_path)

            """prepare to train"""
            global_step = tf.Variable(0,name='global_step',trainable=False)#iteration step

            variable_to_train = []#需要训练的变量
            for variable in tf.trainable_variables():#在图像风格迁移网络(图像生成网络+损失网络)各参数中,找需要训练的参数
                if not (variable.name.startswith(FLAGS.loss_model)):
                    variable_to_train.append(variable)
            train_op = tf.train.AdamOptimizer(1e-3).minimize(loss,global_step = global_step,var_list = variable_to_train) #需要放入sess.run()

            variable_to_restore = []#在所有的全局变量中,找需要恢复默认设置的变量; 注意:local_variable指的是一些临时变量和中间变量,用于线程中,线程结束则消失
            for v tf.global_variables():
                if not (v.name.startswith(FLAGS.loss_model)):
                    variables_to_restore.append(v)
            saver = tf.train.Saver(variables_to_restore,write_version=tf.train.SaverDef.V1)#利用saver.restore()恢复默认设置;这里的variable_to_restore,是需要save and restore的var_list

            sess.run([tf.global_variables_initializer(),tf.local_variables_initializer()])#对全局变量和局部变量进行初始化操作:即恢复默认设置

            #restore variables for loss model 恢复loss model中的参数
            init_func = utils._get_init_fn(FLAGS)
            init_func(sess)

            #restore variables for training model if the checkpoint file exists. 如果training_model已有训练好的参数,将其载入
            last_file = tf.train.latest_checkpoint(training_path)#将train_path中的model参数数据取出
            if last_file:
                tf.logging.info('restoringmodel from {}'.format(last_file))
                saver.restore(sess,last_file) #那如果last_file不存在,就不执行restore操作吗?需要restore的参数只是图像生成网络吗?

            """start training"""
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            start_time = time.time()
            try:
                while not coord.should_stop():#查看线程是否停止(即:是否所有数据均运行完毕)
                    _,loss_t,step = sess.run([train_op,loss,global_step])
                    elapsed_time = time.time()
                    """logging"""
                    #print(step)
                    if step % 10 == 0:
                        tf.logging.info('step:%d, total loss %f, secs/step: %f' % (step,loss_t,elapsed_time))
                    """summary"""
                    if step % 25 == 0:
                        tf.logging.info('adding summary...')
                        summary_str = sess.run(summary)
                        writer.add_summary(summary_str,step)
                        writer.flush()
                    """checkpoint"""
                    if step % 1000 == 0:
                        saver.save(sess,os.path.join(training_path,'fast-style-model.ckpt'),global_step=step)#保存variable_to_restore中的参数值
            except tf.errors.OutOfRangeError:
                saver.save(sess,os.path.join(training_path,'fast-style-model.ckpt-done'))
                tf.logging.info('Done training -- epoch limit reached')
            finally:
                coord.request_stop()#要求停止所有线程
            coord.join(threads)#将线程并入主线程,删除
Exemplo n.º 5
0
def main(FLAGS):
    style_features_t = losses.get_style_features(FLAGS)
    training_path = os.path.join(FLAGS.model_path, FLAGS.naming)
    if not (os.path.exists(training_path)):
        os.makedirs(training_path)

    with tf.Graph().as_default():
        with tf.Session() as sess:
            """创建Network"""
            network_fn = nets_factory.get_network_fn(
                FLAGS.loss_model,
                num_classes=1,
                is_training=False)

            image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)

            """训练图片预处理"""
            processed_images = reader.batch_image(FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size,
                                                  'train2014/', image_preprocessing_fn, epochs=FLAGS.epoch)
            generated = model.transform_network(processed_images, training=True)
            processed_generated = [image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
                                   for image in tf.unstack(generated, axis=0, num=FLAGS.batch_size)
                                   ]
            processed_generated = tf.stack(processed_generated)
            _, endpoints_dict = network_fn(tf.concat([processed_generated, processed_images], 0), spatial_squeeze=False)
            tf.logging.info('Loss network layers(You can define them in "content_layers" and "style_layers"):')
            for key in endpoints_dict:
                tf.logging.info(key)

            """创建 Losses"""
            content_loss = losses.content_loss(endpoints_dict, FLAGS.content_layers)
            style_loss, style_loss_summary = losses.style_loss(endpoints_dict, style_features_t, FLAGS.style_layers)
            tv_loss = losses.total_variation_loss(generated)  # use the unprocessed image

            loss = FLAGS.style_weight * style_loss + FLAGS.content_weight * content_loss + FLAGS.tv_weight * tv_loss

            """准备训练"""
            global_step = tf.Variable(0, name="global_step", trainable=False)
            variable_to_train = []
            for variable in tf.trainable_variables():
                # 只训练和保存生成网络中的变量
                if not (variable.name.startswith(FLAGS.loss_model)):
                    variable_to_train.append(variable)

            """优化"""
            train_op = tf.train.AdamOptimizer(1e-3).minimize(loss, global_step=global_step, var_list=variable_to_train)

            variables_to_restore = []
            for v in tf.global_variables():
                if not (v.name.startswith(FLAGS.loss_model)):
                    variables_to_restore.append(v)
            saver = tf.train.Saver(variables_to_restore, write_version=tf.train.SaverDef.V1)
            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])
            init_func = utils._get_init_fn(FLAGS)
            init_func(sess)
            last_file = tf.train.latest_checkpoint(training_path)
            if last_file:
                tf.logging.info('Restoring model from {}'.format(last_file))
                saver.restore(sess, last_file)

            """开始训练"""
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            start_time = time.time()
            try:
                while not coord.should_stop():
                    _, loss_t, step = sess.run([train_op, loss, global_step])
                    elapsed_time = time.time() - start_time
                    start_time = time.time()
                    if step % 10 == 0:
                        tf.logging.info(
                            'step: %d,  total Loss %f, secs/step: %f,%s' % (step, loss_t, elapsed_time, time.asctime()))
                    """checkpoint"""
                    if step % 50 == 0:
                        tf.logging.info('saving check point...')
                        saver.save(sess, os.path.join(training_path, FLAGS.naming + '.ckpt'), global_step=step)
            except tf.errors.OutOfRangeError:
                saver.save(sess, os.path.join(training_path, 'fast-style-model.ckpt-done'))
                tf.logging.info('Done training -- epoch limit reached')
            finally:
                coord.request_stop()
                tf.logging.info('coordinator stop')
            coord.join(threads)
Exemplo n.º 6
0
def main(FLAGS):
    style_features_t = losses.get_style_features(FLAGS)

    # Make sure the training path exists.
    training_path = os.path.join(FLAGS.model_path, FLAGS.naming)
    if not(os.path.exists(training_path)):
        os.makedirs(training_path)

    with tf.Graph().as_default():
        with tf.Session() as sess:
            """Build Network"""
            network_fn = nets_factory.get_network_fn(
                FLAGS.loss_model,
                num_classes=1,
                is_training=False)

            image_preprocessing_fn, image_unprocessing_fn = preprocessing_factory.get_preprocessing(
                FLAGS.loss_model,
                is_training=False)
            processed_images = reader.image(FLAGS.batch_size, FLAGS.image_size, FLAGS.image_size,
                                            'train2014/', image_preprocessing_fn, epochs=FLAGS.epoch)
            generated = model.net(processed_images, training=True)
            processed_generated = [image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
                                   for image in tf.unstack(generated, axis=0, num=FLAGS.batch_size)
                                   ]
            processed_generated = tf.stack(processed_generated)
            _, endpoints_dict = network_fn(tf.concat([processed_generated, processed_images], 0), spatial_squeeze=False)

            # Log the structure of loss network
            tf.logging.info('Loss network layers(You can define them in "content_layers" and "style_layers"):')
            for key in endpoints_dict:
                tf.logging.info(key)

            """Build Losses"""
            content_loss = losses.content_loss(endpoints_dict, FLAGS.content_layers)
            style_loss, style_loss_summary = losses.style_loss(endpoints_dict, style_features_t, FLAGS.style_layers)
            tv_loss = losses.total_variation_loss(generated)  # use the unprocessed image

            loss = FLAGS.style_weight * style_loss + FLAGS.content_weight * content_loss + FLAGS.tv_weight * tv_loss

            # Add Summary for visualization in tensorboard.
            """Add Summary"""
            tf.summary.scalar('losses/content_loss', content_loss)
            tf.summary.scalar('losses/style_loss', style_loss)
            tf.summary.scalar('losses/regularizer_loss', tv_loss)

            tf.summary.scalar('weighted_losses/weighted_content_loss', content_loss * FLAGS.content_weight)
            tf.summary.scalar('weighted_losses/weighted_style_loss', style_loss * FLAGS.style_weight)
            tf.summary.scalar('weighted_losses/weighted_regularizer_loss', tv_loss * FLAGS.tv_weight)
            tf.summary.scalar('total_loss', loss)

            for layer in FLAGS.style_layers:
                tf.summary.scalar('style_losses/' + layer, style_loss_summary[layer])
            tf.summary.image('generated', generated)
            # tf.image_summary('processed_generated', processed_generated)  # May be better?
            tf.summary.image('origin', tf.stack([
                image_unprocessing_fn(image) for image in tf.unstack(processed_images, axis=0, num=FLAGS.batch_size)
            ]))
            summary = tf.summary.merge_all()
            writer = tf.summary.FileWriter(training_path)

            """Prepare to Train"""
            global_step = tf.Variable(0, name="global_step", trainable=False)

            variable_to_train = []
            for variable in tf.trainable_variables():
                if not(variable.name.startswith(FLAGS.loss_model)):
                    variable_to_train.append(variable)
            train_op = tf.train.AdamOptimizer(1e-3).minimize(loss, global_step=global_step, var_list=variable_to_train)

            variables_to_restore = []
            for v in tf.global_variables():
                if not(v.name.startswith(FLAGS.loss_model)):
                    variables_to_restore.append(v)
            saver = tf.train.Saver(variables_to_restore, write_version=tf.train.SaverDef.V1)

            sess.run([tf.global_variables_initializer(), tf.local_variables_initializer()])

            # Restore variables for loss network.
            init_func = utils._get_init_fn(FLAGS)
            init_func(sess)

            # Restore variables for training model if the checkpoint file exists.
            last_file = tf.train.latest_checkpoint(training_path)
            if last_file:
                tf.logging.info('Restoring model from {}'.format(last_file))
                saver.restore(sess, last_file)

            """Start Training"""
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            start_time = time.time()
            try:
                while not coord.should_stop():
                    _, loss_t, step = sess.run([train_op, loss, global_step])
                    elapsed_time = time.time() - start_time
                    start_time = time.time()
                    """logging"""
                    # print(step)
                    if step % 10 == 0:
                        tf.logging.info('step: %d,  total Loss %f, secs/step: %f' % (step, loss_t, elapsed_time))
                    """summary"""
                    if step % 25 == 0:
                        tf.logging.info('adding summary...')
                        summary_str = sess.run(summary)
                        writer.add_summary(summary_str, step)
                        writer.flush()
                    """checkpoint"""
                    if step % 1000 == 0:
                        saver.save(sess, os.path.join(training_path, 'fast-style-model.ckpt'), global_step=step)
            except tf.errors.OutOfRangeError:
                saver.save(sess, os.path.join(training_path, 'fast-style-model.ckpt-done'))
                tf.logging.info('Done training -- epoch limit reached')
            finally:
                coord.request_stop()
            coord.join(threads)