コード例 #1
0
def main(args):
    tf.disable_eager_execution()

    if os.path.exists(args.output_path):
        shutil.rmtree(args.output_path)
    os.makedirs(args.output_path)

    model_dic, sess = load_model(args)

    dirs = os.listdir(args.input_path)
    for folder in dirs:
        path = os.path.join(args.input_path, folder)
        if os.path.isdir(path):
            output_path = os.path.join(args.output_path, folder)
            if not os.path.exists(output_path):
                os.makedirs(output_path)

            print('========== input %s ============' % (folder))
            positive_img_names = tf.gfile.Glob(os.path.join(
                path, '*_0001.jpg'))  # positive image
            print('Positive: %s' % (os.path.basename(positive_img_names[0])))
            #get positive image
            img_positive = io.imread(positive_img_names[0]) / 255.
            img_positive = rescale(img_positive,
                                   112. / 600.,
                                   order=5,
                                   multichannel=True)
            img_positive = prep(img_positive)  #[1,3,112,112]
            fdict = {
                model_dic['image_input']: img_positive,
                model_dic['keep_prob']: 1.0,
                model_dic['is_train']: False
            }
            pos_embedding_np = sess.run(model_dic['embedding'],
                                        feed_dict=fdict)  # [2,512]

            name_list = []
            img_list = []
            img_names = tf.gfile.Glob(os.path.join(
                path, '*.jpg'))  #tf.gfile.Glob得到的是一个list
            for file in img_names:
                if file != positive_img_names[0]:
                    print('%s' % (os.path.basename(file)))
                    img = prep(
                        rescale(io.imread(file) / 255.,
                                112. / 600.,
                                order=5,
                                multichannel=True))  #[1,3,112,112]
                    name_list.append(os.path.basename(file)[:-4])
                    img_list.append(img)

            feed_img = np.squeeze(np.array(img_list), axis=1)  #[?,3,112,112]
            fdict1 = {
                model_dic['image_input']: feed_img,
                model_dic['keep_prob']: 1.0,
                model_dic['is_train']: False,
                model_dic['positive_embedding']: pos_embedding_np[0]
            }
            last_conv_np, grad_conv_np, grad_input_np, loss_np = sess.run(
                [
                    model_dic['stage4_unit3_conv2'], model_dic['grad_conv'],
                    model_dic['grad_input'], model_dic['loss']
                ],
                feed_dict=fdict1)

            #add neg and pos embedings
            csv_file_name = os.path.join(output_path, 'distance.csv')
            write_csv(name_list, loss_np * (-1), csv_file_name)

            #cam, norm_grad_input_0_1:[112,112]
            norm_grad_input = cacu_norm_grad(grad_input_np)
            cam, norm_grads_conv = calcu_cam(last_conv_np, grad_conv_np)

            cam_dir = os.path.join(output_path, 'cam')
            grad_dir = os.path.join(output_path, 'grad')

            # Filter with threshold
            find_adv_image(args, norm_grad_input, grad_dir, feed_img,
                           name_list, sess, pos_embedding_np[0], model_dic)
コード例 #2
0
 limitations under the License.
"""

import logging as log
import os
import re

from distutils.version import LooseVersion
from mo.utils.error import Error, FrameworkError
from mo.utils.utils import refer_to_faq_msg
from mo.utils.versions_checker import get_environment_setup

try:
    import tensorflow.compat.v1 as tf_v1
    # disable eager execution of TensorFlow 2 environment immediately
    tf_v1.disable_eager_execution()
    import tensorflow as tf
    from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
except ImportError:
    import tensorflow as tf_v1

from google.protobuf import text_format
from mo.graph.graph import fill_graph_with_nodes, Graph
from mo.utils.summarize_graph import summarize_graph


def freeze_checkpoints(graph_def: tf_v1.GraphDef, checkpoint_dir: str,
                       output_node_names: list):
    """
    Loads all the variables in a graph and stores them in a separate dictionary. Freezes output nodes in the graph
    :param graph_def: GraphDef object holding the network.
コード例 #3
0
ファイル: train.py プロジェクト: YCL105/TEST
def main():  # 训练程序的主函数
    if not os.path.exists(args.snapshot_dir):  # 如果保存模型参数的文件夹不存在则创建
        os.makedirs(args.snapshot_dir)
    if not os.path.exists(args.out_dir):  # 如果保存训练中可视化输出的文件夹不存在则创建
        os.makedirs(args.out_dir)
    train_picture_list = glob.glob(os.path.join(args.train_picture_path,
                                                "*"))  # 得到训练输入图像路径名称列表
    tf.set_random_seed(args.random_seed)  # 初始一下随机数

    tf.disable_eager_execution()  #tf2.0版本和1.x版本的区别

    train_picture = tf.placeholder(
        tf.float32,
        shape=[1, args.image_size, args.image_size, 3],
        name='train_picture')  # 输入的训练图像
    train_label = tf.placeholder(
        tf.float32,
        shape=[1, args.image_size, args.image_size, 3],
        name='train_label')  # 输入的与训练图像匹配的标签

    gen_label = generator(image=train_picture,
                          gf_dim=64,
                          reuse=False,
                          name='generator')  # 得到生成器的输出
    dis_real = discriminator(image=train_picture,
                             targets=train_label,
                             df_dim=64,
                             reuse=False,
                             name="discriminator")  # 判别器返回的对真实标签的判别结果
    dis_fake = discriminator(image=train_picture,
                             targets=gen_label,
                             df_dim=64,
                             reuse=True,
                             name="discriminator")  # 判别器返回的对生成(虚假的)标签判别结果
    #原损失函数
    gen_loss_GAN = tf.reduce_mean(
        -tf.log(1 - dis_fake + EPS))  # 计算生成器损失中的GAN_loss部分
    gen_loss_L1 = tf.reduce_mean(l1_loss(gen_label,
                                         train_label))  # 计算生成器损失中的L1_loss部分

    #测试用损失函数
    #gen_loss_L2 = tf.reduce_mean(tf.square(tf.abs(gen_label - train_label + EPS)))  # 计算生成器损失中的L2_loss部分
    #gen_loss_ssim = tf.reduce_mean(1 - tf.image.ssim(gen_label, train_label, max_val=255))  # 计算生成器损失中的ssim_loss部分

    # 原生成器损失函数
    gen_loss = gen_loss_GAN * args.lamda_gan_weight + gen_loss_L1 * args.lamda_l1_weight  # 计算生成器的loss

    # 测试用生成器损失函数
    #gen_loss = gen_loss_GAN * args.lamda_gan_weight + gen_loss_L2 * args.lamda_l1_weight
    ##最小二乘损失
    #gen_loss = tf.reduce_mean(tf.square(dis_fake+ EPS))  * args.lamda_gan_weight + gen_loss_l1 * args.lamda_l1_weight

    # 原判别器损失函数
    dis_loss = tf.reduce_mean(
        -(tf.log(1 - dis_real + EPS) + tf.log(dis_fake + EPS)))  # 计算判别器的loss

    # 测试用判别器损失函数
    ##最小二乘损失
    # dis_loss = tf.reduce_mean(tf.square(dis_fake+ EPS))

    gen_loss_sum = tf.summary.scalar("gen_loss", gen_loss)  # 记录生成器loss的日志
    dis_loss_sum = tf.summary.scalar("dis_loss", dis_loss)  # 记录判别器loss的日志

    summary_writer = tf.summary.FileWriter(
        args.log_dir, graph=tf.get_default_graph())  # 日志记录器

    g_vars = [v for v in tf.trainable_variables()
              if 'generator' in v.name]  # 所有生成器的可训练参数
    d_vars = [
        v for v in tf.trainable_variables() if 'discriminator' in v.name
    ]  # 所有判别器的可训练参数

    d_optim = tf.train.AdamOptimizer(args.base_lr, beta1=args.beta1)  # 判别器训练器
    g_optim = tf.train.AdamOptimizer(args.base_lr, beta1=args.beta1)  # 生成器训练器

    d_grads_and_vars = d_optim.compute_gradients(dis_loss,
                                                 var_list=d_vars)  # 计算判别器参数梯度
    d_train = d_optim.apply_gradients(d_grads_and_vars)  # 更新判别器参数
    g_grads_and_vars = g_optim.compute_gradients(gen_loss,
                                                 var_list=g_vars)  # 计算生成器参数梯度
    g_train = g_optim.apply_gradients(g_grads_and_vars)  # 更新生成器参数

    train_op = tf.group(d_train, g_train)  # train_op表示了参数更新操作
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True  # 设定显存不超量使用
    sess = tf.Session(config=config)  # 新建会话层
    #init = tf.compat.v1.global_variables_initializer()  # 参数初始化器
    init = tf.global_variables_initializer()  # 参数初始化器

    sess.run(init)  # 初始化所有可训练参数

    # saver = tf.compat.v1.train.Saver(var_list=tf.compat.v1.global_variables(), max_to_keep=50)  # 模型保存器
    saver = tf.train.Saver(var_list=tf.global_variables(),
                           max_to_keep=50)  # 模型保存器

    counter = 0  # counter记录训练步数

    for epoch in range(args.epoch):  # 训练epoch数
        shuffle(train_picture_list)  # 每训练一个epoch,就打乱一下输入的顺序
        for step in range(len(train_picture_list)):  # 每个训练epoch中的训练step数
            counter += 1
            picture_name, _ = os.path.splitext(
                os.path.basename(
                    train_picture_list[step]))  # 获取不包含路径和格式的输入图片名称
            # 读取一张训练图片,一张训练标签,以及相应的高和宽
            picture_resize, label_resize, picture_height, picture_width = ImageReader(
                file_name=picture_name,
                picture_path=args.train_picture_path,
                label_path=args.train_label_path,
                picture_format=args.train_picture_format,
                label_format=args.train_label_format,
                size=args.image_size)
            batch_picture = np.expand_dims(np.array(picture_resize).astype(
                np.float32),
                                           axis=0)  # 填充维度
            batch_label = np.expand_dims(np.array(label_resize).astype(
                np.float32),
                                         axis=0)  # 填充维度
            feed_dict = {
                train_picture: batch_picture,
                train_label: batch_label
            }  # 构造feed_dict
            gen_loss_value, dis_loss_value, _ = sess.run(
                [gen_loss, dis_loss, train_op],
                feed_dict=feed_dict)  # 得到每个step中的生成器和判别器loss
            if counter % args.save_pred_every == 0:  # 每过save_pred_every次保存模型
                save(saver, sess, args.snapshot_dir, counter)
            if counter % args.summary_pred_every == 0:  # 每过summary_pred_every次保存训练日志
                gen_loss_sum_value, discriminator_sum_value = sess.run(
                    [gen_loss_sum, dis_loss_sum], feed_dict=feed_dict)
                summary_writer.add_summary(gen_loss_sum_value, counter)
                summary_writer.add_summary(discriminator_sum_value, counter)
            if counter % args.write_pred_every == 0:  # 每过write_pred_every次写一下训练的可视化结果
                gen_label_value = sess.run(gen_label,
                                           feed_dict=feed_dict)  # run出生成器的输出
                write_image = get_write_picture(picture_resize,
                                                gen_label_value, label_resize,
                                                picture_height,
                                                picture_width)  # 得到训练的可视化结果
                write_image_name = args.out_dir + "out" + str(
                    counter) + ".png"  # 待保存的训练可视化结果路径与名称
                cv2.imwrite(write_image_name, write_image)  # 保存训练的可视化结果
            print(
                'epoch {:d} step {:d} \t gen_loss = {:.3f}, dis_loss = {:.3f}'.
                format(epoch, step, gen_loss_value, dis_loss_value))
コード例 #4
0
    def __init__(self, board_width, board_height, num_ships, model_file=None):
        self.board_width = board_width
        self.board_height = board_height
        self.num_ships = num_ships
        self.num_input_dimension = self.num_ships + 1
        self.board_size = self.board_width * self.board_height
        self.hidden_units = self.board_size
        self.output_units = self.board_size
        self.type = tf.float32

        tf.reset_default_graph()
        tf.disable_eager_execution()
        # Define the tensorflow neural network
        # 1. Input:
        self.input_dimensions = tf.placeholder(
            self.type,
            shape=[1, self.num_input_dimension * board_height * board_width])
        self.input_dimensions_reshaped = tf.reshape(
            self.input_dimensions,
            [-1, board_height, board_width, self.num_input_dimension])
        # 2. Networks Layers
        self.layer1 = tf.layers.conv2d(inputs=self.input_dimensions_reshaped,
                                       filters=32,
                                       kernel_size=[3, 3],
                                       padding="same",
                                       activation=tf.nn.relu)
        self.layer2 = tf.layers.conv2d(inputs=self.layer1,
                                       filters=64,
                                       kernel_size=[3, 3],
                                       padding="same",
                                       activation=tf.nn.relu)
        self.layer3 = tf.layers.conv2d(inputs=self.layer2,
                                       filters=self.num_input_dimension,
                                       kernel_size=[1, 1],
                                       padding="same",
                                       activation=tf.nn.relu)
        self.layer3_reshaped = tf.reshape(
            self.layer3,
            [-1, board_height * board_width * self.num_input_dimension])

        # Alternative network structure, simple but not effective compared to CNN
        # self.layer1 = tf.layers.dense(inputs=self.input_dimensions, units=self.num_input_dimension * self.board_size, activation=tf.nn.relu)
        # self.layer2 = tf.layers.dense(inputs=self.layer1, units=self.num_input_dimension * self.board_size, activation=tf.nn.relu)
        self.logits = tf.layers.dense(inputs=self.layer3_reshaped,
                                      units=self.board_size)
        self.probabilities = tf.nn.softmax(self.logits)

        # Training step
        self.labels = tf.placeholder(tf.int64)
        self.learning_rate = tf.placeholder(self.type, shape=[])
        self.cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=self.logits, labels=self.labels)
        # Define loss
        self.loss = tf.reduce_sum(
            tf.multiply(self.learning_rate, self.cross_entropy))
        self.optimizer = tf.train.AdamOptimizer(learning_rate=0.0005).minimize(
            self.loss)

        # Start TF session
        self.session = tf.Session()
        init = tf.initialize_all_variables()
        self.session.run(init)

        # For saving and restoring
        self.saver = tf.train.Saver()
        if model_file is not None:
            print('load model', model_file)
            self.restoreModel(model_file)
コード例 #5
0
ファイル: exercise04_kwak.py プロジェクト: hvkwak/CourseWorks
def get_network_graph(num_features,
                      num_classes,
                      num_layers,
                      activation_function,
                      learning_rate=0.1):

    n = Network()
    num_hidden_neurons = 20  # Neurons per hidden layer

    tf.disable_eager_execution()
    tf.reset_default_graph()

    # x and y of Network n are placeholders.
    # n.x : placeholder. no matter the number of row, it works.
    n.x = tf.placeholder(tf.float32, shape=[None, num_features], name="images")
    n.y_ = tf.placeholder(tf.int32, shape=[None], name="labels")

    current_layer = n.x

    # Add hidden layers.

    # First hidden layer
    # variables are in the scope of Layer1:
    with tf.variable_scope("Layer1"):

        # not to forget that the dimension of the weight matrix and bias vector
        # is transposed.
        w = tf.get_variable("weights",
                            shape=[num_features, num_hidden_neurons])
        b = tf.get_variable("offsets", shape=[1, num_hidden_neurons])

        # w(weights) shape of (num_features, num_hidden_neurons) added.
        # b(biases) shape of (1, num_hidden_neurons) biases added.
        n.weights.append(w)
        n.biases.append(b)

        # check the dimension of tf.matmul and activation function.
        current_layer = activation_function(tf.matmul(current_layer, w) + b)

    # Other hidden layers
    for i in range(2, num_layers + 1):
        with tf.variable_scope("Layer" + str(i)):
            w = tf.get_variable("weights",
                                shape=[num_hidden_neurons, num_hidden_neurons])
            b = tf.get_variable("offsets", shape=[1, num_hidden_neurons])
            n.weights.append(w)
            n.biases.append(b)
            current_layer = activation_function(
                tf.matmul(current_layer, w) + b)

    # Output layer
    with tf.variable_scope("LayerOutput"):
        w = tf.get_variable("weights", shape=[num_hidden_neurons, num_classes])
        b = tf.get_variable("offsets", shape=[1, num_classes])
        n.weights.append(w)
        n.biases.append(b)
        y_unscaled = tf.matmul(current_layer, w) + b

    correct_labels = n.y_
    predicted_labels = tf.argmax(y_unscaled, axis=1, output_type=tf.int32)

    # change the true/falses to float32.
    correct_prediction = tf.cast(tf.equal(correct_labels, predicted_labels),
                                 tf.float32)
    n.accuracy = tf.reduce_sum(correct_prediction) / tf.cast(
        tf.shape(correct_prediction)[0], tf.float32)

    # loss, train_step, variables initilizer
    n.loss = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y_unscaled,
                                                       labels=n.y_))
    n.train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        n.loss)
    n.init_op = tf.global_variables_initializer()

    # return the network.
    return n
コード例 #6
0
ファイル: loader.py プロジェクト: liubo-intel/openvino
def load_tf_graph_def(graph_file_name: str = "",
                      is_binary: bool = True,
                      checkpoint: str = "",
                      model_dir: str = "",
                      saved_model_tags: list = [],
                      meta_graph_file: str = "",
                      user_output_node_names_list: list = []):
    # As a provisional solution, use a native TF methods to load a model protobuf
    graph_def = tf_v1.GraphDef()
    if isinstance(graph_file_name, str) and (re.match(r'.*\.(ckpt|meta)$',
                                                      graph_file_name)):
        print(
            '[ WARNING ] The value for the --input_model command line parameter ends with ".ckpt" or ".meta" '
            'extension.\n'
            'It means that the model is not frozen.\n'
            'To load non frozen model to Model Optimizer run:'
            '\n\n1. For "*.ckpt" file:'
            '\n- if inference graph is in binary format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pb" --input_checkpoint "path/to/*.ckpt"'
            '\n- if inference graph is in text format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pbtxt" --input_model_is_text '
            '--input_checkpoint "path/to/*.ckpt"'
            '\n\n2. For "*.meta" file:'
            '\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"')
    variables_values = {}
    try:
        if graph_file_name and not meta_graph_file and not checkpoint:
            # frozen graph
            return read_file_to_graph_def(
                graph_def, graph_file_name,
                is_binary), variables_values, 'tf', None
        if graph_file_name and not meta_graph_file and checkpoint:
            # inference graph and checkpoint
            graph_def = read_file_to_graph_def(graph_def, graph_file_name,
                                               is_binary)
            outputs = get_output_node_names_list(graph_def,
                                                 user_output_node_names_list)
            if os.path.isfile(checkpoint):
                graph_def = freeze_checkpoint(graph_def=graph_def,
                                              checkpoint=checkpoint,
                                              output_node_names=outputs)
            elif os.path.isdir(checkpoint):
                graph_def, variables_values = freeze_checkpoints(
                    graph_def=graph_def,
                    checkpoint_dir=checkpoint,
                    output_node_names=outputs)
            # we are sure that checkpoint is existing file or directory due to cli_parser configuration
            return graph_def, variables_values, 'tf', None
        if not graph_file_name and meta_graph_file:
            meta_graph_file = deducing_metagraph_path(meta_graph_file)
            input_meta_graph_def = read_file_to_graph_def(
                tf_v1.MetaGraphDef(), meta_graph_file, is_binary)
            # Since version 2.2 TF can fail with internal error while loading graph from .meta file.
            # It happens because some operation may has an _output_shapes attribute inconsistent with the GraphDef
            # calculated value. To avoid this problem we must delete `_output_shapes` attributes from operations
            for node in input_meta_graph_def.graph_def.node:
                if '_output_shapes' in node.attr:
                    del node.attr['_output_shapes']
            # pylint: disable=no-member
            with tf_v1.Session() as sess:
                restorer = tf_v1.train.import_meta_graph(input_meta_graph_def)
                restorer.restore(sess, re.sub(r'\.meta$', '', meta_graph_file))
                outputs = get_output_node_names_list(
                    input_meta_graph_def.graph_def,
                    user_output_node_names_list)
                graph_def = tf_v1.graph_util.convert_variables_to_constants(
                    sess, input_meta_graph_def.graph_def, outputs)
                return graph_def, variables_values, 'tf', None
        if model_dir:
            # saved model directory
            try:
                env_setup = get_environment_setup("tf")
                # enable eager execution temporarily while TensorFlow 2 model is being loaded
                tf_v1.enable_eager_execution()

                try:
                    # Code to extract Keras model.
                    # tf.keras.models.load_model function throws TypeError,KeyError or IndexError
                    # for TF 1.x SavedModel format in case TF 1.x installed
                    imported = tf.keras.models.load_model(model_dir,
                                                          compile=False)
                except:
                    imported = tf.saved_model.load(model_dir, saved_model_tags)  # pylint: disable=E1120

                # to get a signature by key throws KeyError for TF 1.x SavedModel format in case TF 2.x installed
                concrete_func = imported.signatures[
                    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
                # the aggressive inlining parameter needs to freeze a table of embeddings for Keras Embedding operation
                # and a model with Embedding operation cannot properly converted to IR without this function parameter
                if "tensorflow" in env_setup and env_setup[
                        "tensorflow"] >= LooseVersion("2.2.0"):
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func,
                        lower_control_flow=False,
                        aggressive_inlining=True)  # pylint: disable=E1123
                else:
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func, lower_control_flow=False)  # pylint: disable=E1123
                graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
                # disable eager execution since next steps are executed with a graph in non-eager mode
                tf_v1.disable_eager_execution()

                input_names = []
                if hasattr(imported, 'inputs'):
                    # Extract tensor names order from Keras model
                    input_names = [tensor.name for tensor in imported.inputs]

                # After model freezing output tensor names are changing and recieve "Func/PartitionedCall" prefix,
                # so output_names from saved_model cannot be used. Here tensor names from frozen graph are used,
                # as TF adds indexed Identity nodes during freezing to each output, so this indexing is used for
                # order alignment.
                output_names = [tensor.name for tensor in frozen_func.outputs]

                inputs_outputs_order = (input_names, output_names)

                return graph_def, variables_values, 'tf2', inputs_outputs_order
            except:
                # disable eager execution since TensorFlow 1 model is handled
                tf_v1.disable_eager_execution()
                # code to extract GraphDef for TF 1.0 SavedModel format
                tags = saved_model_tags if saved_model_tags is not None else [
                    tf_v1.saved_model.tag_constants.SERVING
                ]
                with tf_v1.Session() as sess:
                    meta_graph_def = tf_v1.saved_model.loader.load(
                        sess, tags, model_dir)
                    outputs = get_output_node_names_list(
                        meta_graph_def.graph_def, user_output_node_names_list)
                    graph_def = tf_v1.graph_util.convert_variables_to_constants(
                        sess, meta_graph_def.graph_def, outputs)
                    return graph_def, variables_values, 'tf', None
    except Exception as e:
        raise FrameworkError('Cannot load input model: {}', e) from e
    raise Error("Unknown configuration of input model parameters")
コード例 #7
0
ファイル: tf1_example.py プロジェクト: pinae/Superresolution
# -*- coding: utf-8 -*-
from tensorflow.compat.v1 import placeholder, Session, disable_eager_execution
from tensorflow import float32
disable_eager_execution()

a = placeholder(float32)
b = placeholder(float32)
c = placeholder(float32)
x = (a + b) / c
sess = Session()
result = sess.run([x], {a: 2, b: 3, c: 4})
print(result)  # [1.25]
コード例 #8
0
  if FLAGS.mode == 'eval':
    for ckpt in tf.train.checkpoints_iterator(
        run_config.model_dir, min_interval_secs=15):
      try:
        result = perform_evaluation(
            estimator=estimator,
            input_fn=data_lib.build_input_fn(builder, False),
            eval_steps=eval_steps,
            model=model,
            num_classes=num_classes,
            checkpoint_path=ckpt)
      except tf.errors.NotFoundError:
        continue
      if result['global_step'] >= train_steps:
        return
  else:
    estimator.train(
        data_lib.build_input_fn(builder, True), max_steps=train_steps)
    if FLAGS.mode == 'train_then_eval':
      perform_evaluation(
          estimator=estimator,
          input_fn=data_lib.build_input_fn(builder, False),
          eval_steps=eval_steps,
          model=model,
          num_classes=num_classes)


if __name__ == '__main__':
  tf.disable_eager_execution()  # Disable eager mode when running with TF2.
  app.run(main)
コード例 #9
0
def main(_):

    if FLAGS.strategy == 'horovod':
        import horovod.tensorflow as hvd  # pylint: disable=g-import-not-at-top
        logging.info('Use horovod with multi gpus')
        hvd.init()
        os.environ['CUDA_VISIBLE_DEVICES'] = str(hvd.local_rank())
    import tensorflow.compat.v1 as tf  # pylint: disable=g-import-not-at-top
    tf.enable_v2_tensorshape()
    tf.disable_eager_execution()

    if FLAGS.strategy == 'tpu':
        tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
            FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
        tpu_grpc_url = tpu_cluster_resolver.get_master()
        tf.Session.reset(tpu_grpc_url)
    else:
        tpu_cluster_resolver = None

    # Check data path
    if FLAGS.mode in (
            'train', 'train_and_eval') and FLAGS.training_file_pattern is None:
        raise RuntimeError(
            'You must specify --training_file_pattern for training.')
    if FLAGS.mode in ('eval', 'train_and_eval'):
        if FLAGS.validation_file_pattern is None:
            raise RuntimeError('You must specify --validation_file_pattern '
                               'for evaluation.')

    # Parse and override hparams
    config = hparams_config.get_detection_config(FLAGS.model_name)
    config.override(FLAGS.hparams)
    if FLAGS.num_epochs:  # NOTE: remove this flag after updating all docs.
        config.num_epochs = FLAGS.num_epochs

    # Parse image size in case it is in string format.
    config.image_size = utils.parse_image_size(config.image_size)

    # The following is for spatial partitioning. `features` has one tensor while
    # `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
    # partition is performed on `features` and all partitionable tensors of
    # `labels`, see the partition logic below.
    # In the TPUEstimator context, the meaning of `shard` and `replica` is the
    # same; follwing the API, here has mixed use of both.
    if FLAGS.use_spatial_partition:
        # Checks input_partition_dims agrees with num_cores_per_replica.
        if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
            raise RuntimeError(
                '--num_cores_per_replica must be a product of array'
                'elements in --input_partition_dims.')

        labels_partition_dims = {
            'mean_num_positives': None,
            'source_ids': None,
            'groundtruth_data': None,
            'image_scales': None,
        }
        # The Input Partition Logic: We partition only the partition-able tensors.
        # Spatial partition requires that the to-be-partitioned tensors must have a
        # dimension that is a multiple of `partition_dims`. Depending on the
        # `partition_dims` and the `image_size` and the `max_level` in config, some
        # high-level anchor labels (i.e., `cls_targets` and `box_targets`) cannot
        # be partitioned. For example, when `partition_dims` is [1, 4, 2, 1], image
        # size is 1536, `max_level` is 9, `cls_targets_8` has a shape of
        # [batch_size, 6, 6, 9], which cannot be partitioned (6 % 4 != 0). In this
        # case, the level-8 and level-9 target tensors are not partition-able, and
        # the highest partition-able level is 7.
        feat_sizes = utils.get_feat_sizes(config.get('image_size'),
                                          config.get('max_level'))
        for level in range(config.get('min_level'),
                           config.get('max_level') + 1):

            def _can_partition(spatial_dim):
                partitionable_index = np.where(
                    spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
                return len(partitionable_index[0]) == len(
                    FLAGS.input_partition_dims)

            spatial_dim = feat_sizes[level]
            if _can_partition(spatial_dim['height']) and _can_partition(
                    spatial_dim['width']):
                labels_partition_dims['box_targets_%d' %
                                      level] = FLAGS.input_partition_dims
                labels_partition_dims['cls_targets_%d' %
                                      level] = FLAGS.input_partition_dims
            else:
                labels_partition_dims['box_targets_%d' % level] = None
                labels_partition_dims['cls_targets_%d' % level] = None
        num_cores_per_replica = FLAGS.num_cores_per_replica
        input_partition_dims = [
            FLAGS.input_partition_dims, labels_partition_dims
        ]
        num_shards = FLAGS.num_cores // num_cores_per_replica
    else:
        num_cores_per_replica = None
        input_partition_dims = None
        num_shards = FLAGS.num_cores

    params = dict(config.as_dict(),
                  model_name=FLAGS.model_name,
                  iterations_per_loop=FLAGS.iterations_per_loop,
                  model_dir=FLAGS.model_dir,
                  num_shards=num_shards,
                  num_examples_per_epoch=FLAGS.num_examples_per_epoch,
                  strategy=FLAGS.strategy,
                  backbone_ckpt=FLAGS.backbone_ckpt,
                  ckpt=FLAGS.ckpt,
                  val_json_file=FLAGS.val_json_file,
                  testdev_dir=FLAGS.testdev_dir,
                  mode=FLAGS.mode)
    config_proto = tf.ConfigProto(allow_soft_placement=True,
                                  log_device_placement=False)
    if FLAGS.use_xla and FLAGS.strategy != 'tpu':
        config_proto.graph_options.optimizer_options.global_jit_level = (
            tf.OptimizerOptions.ON_1)
        config_proto.gpu_options.allow_growth = True

    tpu_config = tf.estimator.tpu.TPUConfig(
        FLAGS.iterations_per_loop,
        num_shards=num_shards,
        num_cores_per_replica=num_cores_per_replica,
        input_partition_dims=input_partition_dims,
        per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig.
        PER_HOST_V2)

    if FLAGS.strategy == 'horovod':
        model_dir = FLAGS.model_dir if hvd.rank() == 0 else None
    else:
        model_dir = FLAGS.model_dir

    run_config = tf.estimator.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        evaluation_master=FLAGS.eval_master,
        model_dir=model_dir,
        log_step_count_steps=FLAGS.iterations_per_loop,
        session_config=config_proto,
        tpu_config=tpu_config,
        tf_random_seed=FLAGS.tf_random_seed,
    )

    model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
    max_instances_per_image = config.max_instances_per_image
    use_tpu = (FLAGS.strategy == 'tpu')

    # TPU Estimator
    logging.info(params)
    if FLAGS.mode == 'train':
        train_estimator = tf.estimator.tpu.TPUEstimator(
            model_fn=model_fn_instance,
            use_tpu=use_tpu,
            train_batch_size=FLAGS.train_batch_size,
            config=run_config,
            params=params)
        train_estimator.train(
            input_fn=dataloader.InputReader(
                FLAGS.training_file_pattern,
                is_training=True,
                use_fake_data=FLAGS.use_fake_data,
                max_instances_per_image=max_instances_per_image),
            max_steps=int((config.num_epochs * FLAGS.num_examples_per_epoch) /
                          FLAGS.train_batch_size))

        if FLAGS.eval_after_training:
            # Run evaluation after training finishes.
            eval_params = dict(
                params,
                strategy=FLAGS.strategy,
                input_rand_hflip=False,
                is_training_bn=False,
                mixed_precision=None,
            )
            eval_estimator = tf.estimator.tpu.TPUEstimator(
                model_fn=model_fn_instance,
                use_tpu=use_tpu,
                train_batch_size=FLAGS.train_batch_size,
                eval_batch_size=FLAGS.eval_batch_size,
                config=run_config,
                params=eval_params)
            eval_results = eval_estimator.evaluate(
                input_fn=dataloader.InputReader(
                    FLAGS.validation_file_pattern,
                    is_training=False,
                    max_instances_per_image=max_instances_per_image),
                steps=FLAGS.eval_samples // FLAGS.eval_batch_size,
                name=FLAGS.eval_name)
            logging.info('Eval results: %s', eval_results)
            ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
            utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)

    elif FLAGS.mode == 'eval':
        # Override the default options: disable randomization in the input pipeline
        # and don't run on the TPU.
        eval_params = dict(
            params,
            strategy=FLAGS.strategy,
            input_rand_hflip=False,
            is_training_bn=False,
            mixed_precision=None,
        )

        eval_estimator = tf.estimator.tpu.TPUEstimator(
            model_fn=model_fn_instance,
            use_tpu=use_tpu,
            train_batch_size=FLAGS.train_batch_size,
            eval_batch_size=FLAGS.eval_batch_size,
            config=run_config,
            params=eval_params)

        def terminate_eval():
            logging.info('Terminating eval after %d seconds of no checkpoints',
                         FLAGS.eval_timeout)
            return True

        # Run evaluation when there's a new checkpoint
        for ckpt in tf.train.checkpoints_iterator(
                FLAGS.model_dir,
                min_interval_secs=FLAGS.min_eval_interval,
                timeout=FLAGS.eval_timeout,
                timeout_fn=terminate_eval):

            logging.info('Starting to evaluate.')
            try:
                eval_results = eval_estimator.evaluate(
                    input_fn=dataloader.InputReader(
                        FLAGS.validation_file_pattern,
                        is_training=False,
                        max_instances_per_image=max_instances_per_image),
                    steps=FLAGS.eval_samples // FLAGS.eval_batch_size,
                    name=FLAGS.eval_name)
                logging.info('Eval results: %s', eval_results)

                # Terminate eval job when final checkpoint is reached.
                try:
                    current_step = int(os.path.basename(ckpt).split('-')[1])
                except IndexError:
                    logging.info('%s has no global step info: stop!', ckpt)
                    break

                utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
                total_step = int(
                    (config.num_epochs * FLAGS.num_examples_per_epoch) /
                    FLAGS.train_batch_size)
                if current_step >= total_step:
                    logging.info('Evaluation finished after training step %d',
                                 current_step)
                    break

            except tf.errors.NotFoundError:
                # Since the coordinator is on a different job than the TPU worker,
                # sometimes the TPU worker does not finish initializing until long after
                # the CPU job tells it to start evaluating. In this case, the checkpoint
                # file could have been deleted already.
                logging.info(
                    'Checkpoint %s no longer exists, skipping checkpoint',
                    ckpt)

    elif FLAGS.mode == 'train_and_eval':
        for cycle in range(config.num_epochs):
            logging.info('Starting training cycle, epoch: %d.', cycle)
            train_estimator = tf.estimator.tpu.TPUEstimator(
                model_fn=model_fn_instance,
                use_tpu=use_tpu,
                train_batch_size=FLAGS.train_batch_size,
                config=run_config,
                params=params)
            train_estimator.train(input_fn=dataloader.InputReader(
                FLAGS.training_file_pattern,
                is_training=True,
                use_fake_data=FLAGS.use_fake_data,
                max_instances_per_image=max_instances_per_image),
                                  steps=int(FLAGS.num_examples_per_epoch /
                                            FLAGS.train_batch_size))

            logging.info('Starting evaluation cycle, epoch: %d.', cycle)
            # Run evaluation after every epoch.
            eval_params = dict(
                params,
                strategy=FLAGS.strategy,
                input_rand_hflip=False,
                is_training_bn=False,
            )

            eval_estimator = tf.estimator.tpu.TPUEstimator(
                model_fn=model_fn_instance,
                use_tpu=use_tpu,
                train_batch_size=FLAGS.train_batch_size,
                eval_batch_size=FLAGS.eval_batch_size,
                config=run_config,
                params=eval_params)
            eval_results = eval_estimator.evaluate(
                input_fn=dataloader.InputReader(
                    FLAGS.validation_file_pattern,
                    is_training=False,
                    max_instances_per_image=max_instances_per_image),
                steps=FLAGS.eval_samples // FLAGS.eval_batch_size,
                name=FLAGS.eval_name)
            logging.info('Evaluation results: %s', eval_results)
            ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
            utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)

    else:
        logging.info('Mode not found.')
コード例 #10
0
def main(argv=None):
    # if tf.gfile.Exists(FLAGS.save_dir):
    #     tf.gfile.DeleteRecursively(FLAGS.save_dir)
    # tf.gfile.MakeDirs(FLAGS.save_dir)
    # if tf.gfile.Exists(FLAGS.gen_frm_dir):
    #     tf.gfile.DeleteRecursively(FLAGS.gen_frm_dir)
    # tf.gfile.MakeDirs(FLAGS.gen_frm_dir)
    # if os.path.exists(FLAGS.save_dir):
    #    shutil.rmtree(FLAGS.save_dir)
    # os.mkdir(FLAGS.save_dir)
    # if os.path.exists(FLAGS.gen_frm_dir):
    #    shutil.rmtree(FLAGS.gen_frm_dir)
    # os.mkdir(FLAGS.gen_frm_dir)

    tf.disable_eager_execution()  #toegevoegd anders error

    # load data
    train_input_handle, test_input_handle = datasets_factory.data_provider(
        FLAGS.dataset_name, FLAGS.train_data_paths, FLAGS.valid_data_paths,
        FLAGS.batch_size, FLAGS.img_width)

    #exit(0) #deze exit gebruikt voor het maken van de auto.py dataset-maker

    print("Initializing models")
    model = Model()
    lr = FLAGS.lr

    delta = 0.00002
    base = 0.99998
    eta = 1

    for itr in xrange(1, FLAGS.max_iterations + 1):
        if train_input_handle.no_batch_left():
            train_input_handle.begin(do_shuffle=True)
        ims = train_input_handle.get_batch()
        ims = preprocess.reshape_patch(ims, FLAGS.patch_size)

        if itr < 50000:
            eta -= delta
        else:
            eta = 0.0
        random_flip = np.random.random_sample(
            (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1))
        true_token = (random_flip < eta)
        #true_token = (random_flip < pow(base,itr))
        ones = np.ones((int(FLAGS.img_height / FLAGS.patch_size),
                        int(FLAGS.img_width / FLAGS.patch_size),
                        FLAGS.patch_size**2 * FLAGS.img_channel))
        zeros = np.zeros((int(FLAGS.img_height / FLAGS.patch_size),
                          int(FLAGS.img_width / FLAGS.patch_size),
                          FLAGS.patch_size**2 * FLAGS.img_channel))
        mask_true = []
        for i in xrange(FLAGS.batch_size):
            for j in xrange(FLAGS.seq_length - FLAGS.input_length - 1):
                if true_token[i, j]:
                    mask_true.append(ones)
                else:
                    mask_true.append(zeros)
        mask_true = np.array(mask_true)
        mask_true = np.reshape(
            mask_true,
            (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
             int(FLAGS.img_height / FLAGS.patch_size),
             int(FLAGS.img_width / FLAGS.patch_size),
             FLAGS.patch_size**2 * FLAGS.img_channel))

        print("Learning rate: " + str(lr))
        cost = model.train(ims, lr, mask_true)
        if itr == 32000 or itr == 52000:
            lr /= 10

        if FLAGS.reverse_input:
            ims_rev = ims[:, ::-1]
            cost += model.train(ims_rev, lr, mask_true)
            cost = cost / 2

        if itr % FLAGS.display_interval == 0:
            print('itr: ' + str(itr))
            print('training loss: ' + str(cost))

        if itr % FLAGS.test_interval == 0:
            print('test...')
            test_input_handle.begin(do_shuffle=False)
            res_path = os.path.join(FLAGS.gen_frm_dir, str(itr))
            os.mkdir(res_path)
            avg_mse = 0
            batch_id = 0
            img_mse, ssim, psnr, fmae, sharp = [], [], [], [], []
            for i in xrange(FLAGS.seq_length - FLAGS.input_length):
                img_mse.append(0)
                ssim.append(0)
                psnr.append(0)
                fmae.append(0)
                sharp.append(0)
            mask_true = np.zeros(
                (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
                 int(FLAGS.img_height / FLAGS.patch_size),
                 int(FLAGS.img_width / FLAGS.patch_size),
                 FLAGS.patch_size**2 * FLAGS.img_channel))
            while (test_input_handle.no_batch_left() == False):
                batch_id = batch_id + 1
                test_ims = test_input_handle.get_batch()
                test_dat = preprocess.reshape_patch(test_ims, FLAGS.patch_size)
                img_gen = model.test(test_dat, mask_true)

                # concat outputs of different gpus along batch
                img_gen = np.concatenate(img_gen)
                img_gen = preprocess.reshape_patch_back(
                    img_gen, FLAGS.patch_size)
                # MSE per frame
                for i in xrange(FLAGS.seq_length - FLAGS.input_length):
                    x = test_ims[:, i + FLAGS.input_length, :, :, 0]
                    gx = img_gen[:, i, :, :, 0]
                    fmae[i] += metrics.batch_mae_frame_float(gx, x)
                    gx = np.maximum(gx, 0)
                    gx = np.minimum(gx, 1)
                    mse = np.square(x - gx).sum()
                    img_mse[i] += mse
                    avg_mse += mse

                    real_frm = np.uint8(x * 255)
                    pred_frm = np.uint8(gx * 255)
                    psnr[i] += metrics.batch_psnr(pred_frm, real_frm)
                    for b in xrange(FLAGS.batch_size):
                        sharp[i] += np.max(
                            cv2.convertScaleAbs(cv2.Laplacian(pred_frm[b], 3)))
                        score, _ = compare_ssim(pred_frm[b],
                                                real_frm[b],
                                                full=True)
                        ssim[i] += score

                # save prediction examples
                if batch_id <= 10:
                    path = os.path.join(res_path, str(batch_id))
                    os.mkdir(path)
                    for i in xrange(FLAGS.seq_length):
                        name = 'gt' + str(i + 1) + '.png'
                        file_name = os.path.join(path, name)
                        img_gt = np.uint8(test_ims[0, i, :, :, :] * 255)
                        cv2.imwrite(file_name, img_gt)
                    for i in xrange(FLAGS.seq_length - FLAGS.input_length):
                        name = 'pd' + str(i + 1 + FLAGS.input_length) + '.png'
                        file_name = os.path.join(path, name)
                        img_pd = img_gen[0, i, :, :, :]
                        img_pd = np.maximum(img_pd, 0)
                        img_pd = np.minimum(img_pd, 1)
                        img_pd = np.uint8(img_pd * 255)
                        cv2.imwrite(file_name, img_pd)
                test_input_handle.next()
            avg_mse = avg_mse / (batch_id * FLAGS.batch_size)
            print('mse per seq: ' + str(avg_mse))
            for i in xrange(FLAGS.seq_length - FLAGS.input_length):
                print(img_mse[i] / (batch_id * FLAGS.batch_size))
            psnr = np.asarray(psnr, dtype=np.float32) / batch_id
            fmae = np.asarray(fmae, dtype=np.float32) / batch_id
            ssim = np.asarray(ssim,
                              dtype=np.float32) / (FLAGS.batch_size * batch_id)
            sharp = np.asarray(
                sharp, dtype=np.float32) / (FLAGS.batch_size * batch_id)
            print('psnr per frame: ' + str(np.mean(psnr)))
            for i in xrange(FLAGS.seq_length - FLAGS.input_length):
                print(psnr[i])
            print('fmae per frame: ' + str(np.mean(fmae)))
            for i in xrange(FLAGS.seq_length - FLAGS.input_length):
                print(fmae[i])
            print('ssim per frame: ' + str(np.mean(ssim)))
            for i in xrange(FLAGS.seq_length - FLAGS.input_length):
                print(ssim[i])
            print('sharpness per frame: ' + str(np.mean(sharp)))
            for i in xrange(FLAGS.seq_length - FLAGS.input_length):
                print(sharp[i])

        if itr % FLAGS.snapshot_interval == 0:
            model.save(itr)

        train_input_handle.next()
コード例 #11
0
import tensorflow.compat.v1 as tensorflow

tensorflow.disable_eager_execution()

config = tensorflow.ConfigProto()
config.gpu_options.allow_growth = True

import random
import numpy

import pickle

from Dataset import Dataset
from Embeddings.TransE import TransE
from Embeddings.ComplEx import ComplEx
from Embeddings.TuckER import TuckER

import argparse

parser = argparse.ArgumentParser()
parser.add_argument('-d', '--dataset', type=str, default="NELL995")
parser.add_argument('-t',
                    '--task',
                    type=str,
                    default="concept_agentbelongstoorganization")
parser.add_argument('-x', '--embedding-method', type=str, default="TransE")
parser.add_argument('-e', '--embedding-size', type=int, default=100)
parser.add_argument('-m', '--margin', type=float, default=1.0)
parser.add_argument('-r', '--learning-rate', type=float, default=1e-3)
parser.add_argument('-b', '--batch-size', type=int, default=1024)
parser.add_argument('-g', '--sampling-type', type=str, default='bernoulli')
コード例 #12
0
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import os
import re
import sys
import tarfile

from six.moves import urllib
#修改以适应TF2.0
import tensorflow.compat.v1 as tf

tf.disable_eager_execution(
)  #tensorflow2.0运行1.0的代码会报错误:“AttributeError: module 'tensorflow' has no attribute 'placeholder'”

import cifar10_input

FLAGS = tf.app.flags.FLAGS

# Basic model parameters.
tf.app.flags.DEFINE_integer('batch_size', 128,
                            """Number of images to process in a batch.""")
tf.app.flags.DEFINE_string('data_dir', 'cifar10_data',
                           """Path to the CIFAR-10 data directory.""")
tf.app.flags.DEFINE_boolean('use_fp16', False,
                            """Train the model using fp16.""")

# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = cifar10_input.IMAGE_SIZE
コード例 #13
0
ファイル: test-front.py プロジェクト: JefPlochaet/MasterProef
def main(argv=None):

    tf.disable_eager_execution()  #toegevoegd anders error

    # load data
    _, test_input_handle = datasets_factory.data_provider(
        FLAGS.dataset_name, FLAGS.train_data_paths, FLAGS.test_data_paths,
        FLAGS.batch_size, FLAGS.img_width)

    print("Initializing models")
    model = Model()
    lr = FLAGS.lr

    print('test...')
    test_input_handle.begin(do_shuffle=False)
    res_path = os.path.join(FLAGS.gen_frm_dir, 'test')
    os.mkdir(res_path)
    avg_mse = 0
    batch_id = 0
    img_mse, ssim, psnr, fmae, sharp = [], [], [], [], []
    for i in xrange(FLAGS.seq_length - FLAGS.input_length):
        img_mse.append(0)
        ssim.append(0)
        psnr.append(0)
        fmae.append(0)
        sharp.append(0)
    mask_true = np.zeros(
        (FLAGS.batch_size, FLAGS.seq_length - FLAGS.input_length - 1,
         int(FLAGS.img_height / FLAGS.patch_size),
         int(FLAGS.img_width / FLAGS.patch_size),
         FLAGS.patch_size**2 * FLAGS.img_channel))
    while (test_input_handle.no_batch_left() == False):
        batch_id = batch_id + 1
        test_ims = test_input_handle.get_batch()
        test_dat = preprocess.reshape_patch(test_ims, FLAGS.patch_size)
        img_gen = model.test(test_dat, mask_true)

        # concat outputs of different gpus along batch
        img_gen = np.concatenate(img_gen)
        img_gen = preprocess.reshape_patch_back(img_gen, FLAGS.patch_size)
        # MSE per frame
        for i in xrange(FLAGS.seq_length - FLAGS.input_length):
            x = test_ims[:, i + FLAGS.input_length, :, :, 0]
            gx = img_gen[:, i, :, :, 0]
            fmae[i] += metrics.batch_mae_frame_float(gx, x)
            gx = np.maximum(gx, 0)
            gx = np.minimum(gx, 1)
            mse = np.square(x - gx).sum()
            img_mse[i] += mse
            avg_mse += mse

            real_frm = np.uint8(x * 255)
            pred_frm = np.uint8(gx * 255)
            psnr[i] += metrics.batch_psnr(pred_frm, real_frm)
            for b in xrange(FLAGS.batch_size):
                sharp[i] += np.max(
                    cv2.convertScaleAbs(cv2.Laplacian(pred_frm[b], 3)))
                score, _ = compare_ssim(pred_frm[b], real_frm[b], full=True)
                ssim[i] += score

        # save prediction examples
        if batch_id <= 10:
            path = os.path.join(res_path, str(batch_id))
            os.mkdir(path)
            for i in xrange(FLAGS.seq_length):
                name = 'gt' + str(i + 1) + '.png'
                file_name = os.path.join(path, name)
                img_gt = np.uint8(test_ims[0, i, :, :, :] * 255)
                cv2.imwrite(file_name, img_gt)
            for i in xrange(FLAGS.seq_length - FLAGS.input_length):
                name = 'pd' + str(i + 1 + FLAGS.input_length) + '.png'
                file_name = os.path.join(path, name)
                img_pd = img_gen[0, i, :, :, :]
                img_pd = np.maximum(img_pd, 0)
                img_pd = np.minimum(img_pd, 1)
                img_pd = np.uint8(img_pd * 255)
                cv2.imwrite(file_name, img_pd)
        test_input_handle.next()
    avg_mse = avg_mse / (batch_id * FLAGS.batch_size)
    print('mse per seq: ' + str(avg_mse))
    for i in xrange(FLAGS.seq_length - FLAGS.input_length):
        print(img_mse[i] / (batch_id * FLAGS.batch_size))
    psnr = np.asarray(psnr, dtype=np.float32) / batch_id
    fmae = np.asarray(fmae, dtype=np.float32) / batch_id
    ssim = np.asarray(ssim, dtype=np.float32) / (FLAGS.batch_size * batch_id)
    sharp = np.asarray(sharp, dtype=np.float32) / (FLAGS.batch_size * batch_id)
    print('psnr per frame: ' + str(np.mean(psnr)))
    for i in xrange(FLAGS.seq_length - FLAGS.input_length):
        print(psnr[i])
    print('fmae per frame: ' + str(np.mean(fmae)))
    for i in xrange(FLAGS.seq_length - FLAGS.input_length):
        print(fmae[i])
    print('ssim per frame: ' + str(np.mean(ssim)))
    for i in xrange(FLAGS.seq_length - FLAGS.input_length):
        print(ssim[i])
    print('sharpness per frame: ' + str(np.mean(sharp)))
    for i in xrange(FLAGS.seq_length - FLAGS.input_length):
        print(sharp[i])
コード例 #14
0
 def __init__(self, log_dir):
     """Create a summary writer logging to log_dir."""
     tf.disable_eager_execution()
     self.writer = tf.summary.FileWriter(log_dir)
コード例 #15
0
def main(argv):
    del argv  # unused.
    tf.reset_default_graph()
    tf.disable_eager_execution()
    # set random seed.
    tf.set_random_seed(FLAGS.tf_rand_seed)
    np.random.seed(FLAGS.np_rand_seed)

    dataset_name = FLAGS.dataset_name
    n_labels = num_labels[dataset_name]
    num_train = num_train_dict[dataset_name]
    num_test = num_test_dict[dataset_name]

    matching = FLAGS.matching

    # load training and test data.
    print('--- Loading {} ---'.format(dataset_name))
    # print('loading dataset')
    x_train, y_train, x_test, y_test = get_data(dataset_name)

    # define the model.
    model_name = FLAGS.model_name
    if model_name == 'cnn':
        print('--- Building CNN model ---')
        model = models.CnnModel(n_labels)
    elif model_name == 'vgg':
        print('--- Building VGG model ---')
        model = models.VggModel(n_labels)
    else:
        raise NotImplementedError

    # num of batches to eval test data.
    eval_batch_size = FLAGS.eval_batch_size
    num_eval_batch = int(num_test / eval_batch_size)
    # num of batches to eval train data.
    num_eval_train_batch = int(num_train / eval_batch_size)
    batch_size = FLAGS.batch_size
    num_steps_per_epoch = int(num_train / batch_size)
    training_epochs = FLAGS.training_epochs

    # define training operations.
    global_step = tf.train.get_or_create_global_step()
    trainable_vars = tf.trainable_variables()
    grads = tf.gradients(model.total_loss, trainable_vars)
    learning_rate = FLAGS.learning_rate
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        model.total_loss, global_step=global_step)

    # define matching operations
    if matching != 'none':
        l_rate_match = get_matching_lr(matching, learning_rate, batch_size)
        trainable_vars_ph_0 = []
        trainable_vars_ph_1 = []
        for var in trainable_vars:
            trainable_vars_ph_0.append(
                tf.placeholder(tf.float32, shape=var.shape))
            trainable_vars_ph_1.append(
                tf.placeholder(tf.float32, shape=var.shape))
        matching_step = []
        for var, ph_0, ph_1 in zip(trainable_vars, trainable_vars_ph_0,
                                   trainable_vars_ph_1):
            matching_step.append(
                tf.assign_add(var, l_rate_match * (ph_0 - ph_1)))

    # checkpoint saver
    saver = tf.train.Saver(max_to_keep=1)
    checkpoint_dir = FLAGS.checkpoint_dir

    # start training.
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        # First, if checkpoint_dir does not exist, create a folder.
        if not tf.gfile.IsDirectory(checkpoint_dir):
            # checkpoint directory does not exist; create directory.
            tf.gfile.MakeDirs(checkpoint_dir)

        epoch = 0
        train_acc_record = []
        train_loss_record = []
        test_acc_record = []
        test_loss_record = []

        while epoch < training_epochs:
            # compute train/test acc/loss and log, add to records.
            # compute test acc and loss on whole dataset.
            test_acc = 0.0
            test_loss = 0.0
            for b_id in range(num_eval_batch):
                data_dict_eval = {
                    model.x_input:
                    x_test[b_id * eval_batch_size:(b_id + 1) * eval_batch_size,
                           ...],
                    model.y_input:
                    y_test[b_id * eval_batch_size:(b_id + 1) * eval_batch_size]
                }
                test_acc_batch, test_loss_batch = sess.run(
                    [model.accuracy, model.total_loss],
                    feed_dict=data_dict_eval)
                test_acc += test_acc_batch
                test_loss += test_loss_batch
            test_acc /= float(num_eval_batch)
            test_loss /= float(num_eval_batch)
            test_acc_record.append(test_acc)
            test_loss_record.append(test_loss)

            # compute train acc, loss, and full gradient on whole dataset.
            train_acc = 0.0
            train_loss = 0.0
            for b_id in range(num_eval_train_batch):
                data_dict_eval_train = {
                    model.x_input:
                    x_train[b_id * eval_batch_size:(b_id + 1) *
                            eval_batch_size, ...],
                    model.y_input:
                    y_train[b_id * eval_batch_size:(b_id + 1) *
                            eval_batch_size]
                }
                train_acc_batch, train_loss_batch = sess.run(
                    [model.accuracy, model.total_loss],
                    feed_dict=data_dict_eval_train)
                train_acc += train_acc_batch
                train_loss += train_loss_batch
            train_acc /= float(num_eval_train_batch)
            train_loss /= float(num_eval_train_batch)
            train_acc_record.append(train_acc)
            train_loss_record.append(train_loss)
            print('--- epoch {}, train acc {:.4f}, test acc {:.4f} ---'.format(
                epoch, train_acc, test_acc))

            if (epoch + 1) % FLAGS.num_checkpoint_epochs == 0:
                # write a checkpoint.
                print('--- Saving checkpoint ---')
                # save checkpoints.
                saver.save(sess,
                           os.path.join(checkpoint_dir, 'model.ckpt'),
                           global_step=global_step)
                # save training and test records.
                print('--- Saving records ---')
                # save train/test records.
                np.save(checkpoint_dir + '/train_acc.npy', train_acc_record)
                np.save(checkpoint_dir + '/train_loss.npy', train_loss_record)
                np.save(checkpoint_dir + '/test_acc.npy', test_acc_record)
                np.save(checkpoint_dir + '/test_loss.npy', test_loss_record)

            # Actual training step
            for _ in range(num_steps_per_epoch):
                # sample a batch without replacement
                selected_index = np.random.choice(num_train,
                                                  size=batch_size,
                                                  replace=False)
                x_batch = x_train[selected_index, ...]
                x_aug = data_augment(x_batch)
                y_batch = y_train[selected_index]
                data_dict = {model.x_input: x_aug, model.y_input: y_batch}
                sess.run(train_step, feed_dict=data_dict)

                # If using matching schemes, do these after the gradient descent step.
                if matching == 'lr_matching':
                    selected_index_0 = np.random.choice(num_train,
                                                        size=batch_size,
                                                        replace=True)
                    selected_index_1 = np.random.choice(num_train,
                                                        size=batch_size,
                                                        replace=True)
                elif matching == 'batch_matching':
                    selected_index_0 = np.random.choice(
                        num_train,
                        size=FLAGS.matching_batch_size,
                        replace=True)
                    selected_index_1 = np.random.choice(
                        num_train,
                        size=FLAGS.matching_batch_size,
                        replace=True)
                if matching != 'none':
                    x_batch_0 = x_train[selected_index_0, ...]
                    x_batch_1 = x_train[selected_index_1, ...]
                    x_aug_0 = data_augment(x_batch_0)
                    x_aug_1 = data_augment(x_batch_1)
                    y_batch_0 = y_train[selected_index_0]
                    y_batch_1 = y_train[selected_index_1]
                    data_dict_0 = {
                        model.x_input: x_aug_0,
                        model.y_input: y_batch_0
                    }
                    data_dict_1 = {
                        model.x_input: x_aug_1,
                        model.y_input: y_batch_1
                    }

                    # compute gradients and difference
                    grads_val_0 = sess.run(grads, feed_dict=data_dict_0)
                    grads_val_1 = sess.run(grads, feed_dict=data_dict_1)
                    # add to variables
                    grads_dict = {}
                    for ph_0, ph_1, array_0, array_1 in zip(
                            trainable_vars_ph_0, trainable_vars_ph_1,
                            grads_val_0, grads_val_1):
                        grads_dict[ph_0] = array_0
                        grads_dict[ph_1] = array_1
                    sess.run(matching_step, feed_dict=grads_dict)

            # This epoch finished, update the epoch counter.
            epoch += 1
コード例 #16
0
def create_client_batches(clients: List[ReptileClient],
                          batch_size: int) -> List[List[ReptileClient]]:
    if batch_size == -1:
        client_batches = [clients]
    else:
        client_batches = [
            clients[i:i + batch_size]
            for i in range(0, len(clients), batch_size)
        ]
    return client_batches

    num_clients_train = 10000
    num_clients_test = 1000
    num_classes_per_client = 5
    num_shots_per_class = 5

    eval_iters = 10

    reptile_args = ReptileTrainingArgs(model=OmniglotModel,
                                       inner_optimizer=optim.Adam,
                                       inner_learning_rate=0.001,
                                       num_inner_steps=5,
                                       num_inner_steps_eval=50,
                                       log_every_n_steps=3,
                                       inner_batch_size=10,
                                       meta_batch_size=5,
                                       meta_learning_rate_initial=1,
                                       meta_learning_rate_final=0,
                                       num_meta_steps=3000)
    experiment_logger = create_tensorboard_logger(
        context.name, "dataloading_ours;models_ours")

    # Load and prepare Omniglot data
    data_dir = REPO_ROOT / 'data' / 'omniglot'

    #######
    tf.disable_eager_execution()
    #######

    omniglot_train_clients, omniglot_test_clients = load_omniglot_datasets(
        str(data_dir.absolute()),
        num_clients_train=num_clients_train,
        num_clients_test=num_clients_test,
        num_classes_per_client=num_classes_per_client,
        num_shots_per_class=num_shots_per_class,
        inner_batch_size=reptile_args.inner_batch_size,
        random_seed=RANDOM_SEED)

    # Prepare ModelArgs for task training
    inner_optimizer_args = OptimizerArgs(
        optimizer_class=reptile_args.inner_optimizer,
        lr=reptile_args.inner_learning_rate,
        betas=(0, 0.999))
    inner_model_args = ModelArgs(reptile_args.model,
                                 inner_optimizer_args,
                                 num_classes=num_classes_per_client)
    dummy_optimizer_args = OptimizerArgs(optimizer_class=optim.SGD)
    meta_model_args = ModelArgs(reptile_args.model,
                                dummy_optimizer_args,
                                num_classes=num_classes_per_client)
    """
    # Set up clients
    # Since we are doing meta-learning, we need separate sets of training and
    # test clients
    train_clients = initialize_reptile_clients(context, train_datasets)
    test_clients = initialize_reptile_clients(context, test_datasets)

    # Set up server
    server = ReptileServer(
        participant_name='initial_server',
        model_args=context.meta_model_args,
        context=context,
        initial_model_state=initial_model_state
    )"""

    torch_model = OmniglotModel(num_classes=num_classes_per_client)
    torch_optimizer = inner_optimizer_args.optimizer_class(
        torch_model.parameters(), **inner_optimizer_args.optimizer_kwargs)

    reptile = Reptile(model=torch_model,
                      optimizer=torch_optimizer,
                      inner_iterations=reptile_args.num_inner_steps,
                      inner_iterations_eval=reptile_args.num_inner_steps_eval)

    for i in range(reptile_args.num_meta_steps):
        frac_done = i / reptile_args.num_meta_steps
        cur_meta_step_size = frac_done * reptile_args.meta_learning_rate_final + (
            1 - frac_done) * reptile_args.meta_learning_rate_initial

        meta_batch = {
            k: omniglot_train_clients.train_data_local_dict[k]
            for k in cyclerange(
                i * reptile_args.meta_batch_size %
                len(omniglot_train_clients.train_data_local_dict), (i + 1) *
                reptile_args.meta_batch_size %
                len(omniglot_train_clients.train_data_local_dict),
                len(omniglot_train_clients.train_data_local_dict))
        }

        reptile.train_step(meta_batch=meta_batch,
                           meta_step_size=cur_meta_step_size)

        if i % eval_iters == 0:
            accuracies = []
            k = RANDOM.randrange(
                len(omniglot_train_clients.train_data_local_dict))
            train_train = omniglot_train_clients.train_data_local_dict[k]
            train_test = omniglot_train_clients.test_data_local_dict[k]
            k = RANDOM.randrange(
                len(omniglot_test_clients.train_data_local_dict))
            test_train = omniglot_test_clients.train_data_local_dict[k]
            test_test = omniglot_test_clients.test_data_local_dict[k]

            for train_dl, test_dl in [(train_train, train_test),
                                      (test_train, test_test)]:
                correct = reptile.evaluate(train_dl, test_dl)
                accuracies.append(correct / num_classes_per_client)
            print('batch %d: train=%f test=%f' %
                  (i, accuracies[0], accuracies[1]))

            # Write to TensorBoard
            experiment_logger.experiment.add_scalar(
                'train-test/acc/{}/mean'.format('global_model'),
                accuracies[0],
                global_step=i)
            experiment_logger.experiment.add_scalar(
                'test-test/acc/{}/mean'.format('global_model'),
                accuracies[1],
                global_step=i)
    """
コード例 #17
0
ファイル: shenjingtest2.py プロジェクト: mark-fyq/tensorflow
#shenjingtest3.py
import tensorflow.compat.v1 as tf

tf.disable_eager_execution()  #启用动态图机制

#构建两个常数

matrix1 = tf.constant([[3, 3]])
matrix2 = tf.constant([[2], [2]])
product = tf.matmul(matrix1, matrix2)  #矩阵相乘

##part1
# sess = tf.Session()
# result = sess.run(product)
# print(result)
# sess.close()#关闭会话

#part2
with tf.Session() as sess:  #进入Session会话
    result = sess.run(product)
    print(result)
コード例 #18
0
import resnet
import data as data_lib
import model as model_lib
import model_util as model_util

import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
import tensorflow_hub as hub

#boostx :
'''
the tf2.0 have a issue with mode.save https://github.com/tensorflow/tensorflow/issues/26814
use tf.keras.models.save() instead of model.save()
'''
'''
tf.disable_eager_execution()

with tf.Session() as sess:
  saver = tf.train.import_meta_graph('./tmp/simclr_test_ft/model.ckpt-196.meta')
  saver.restore(sess, tf.train.latest_checkpoint('./tmp/simclr_test_ft'))
  
  
graph = tf.get_default_graph()
print(graph.get_operations())
'''

FLAGS = flags.FLAGS

flags.DEFINE_float('learning_rate', 0.3,
                   'Initial learning rate per batch size of 256.')
コード例 #19
0
    def testPCgradBasic(self, denylist, allowlist, pcgrad_var_idx):
        tf.disable_eager_execution()
        for dtype in [tf.dtypes.float32, tf.dtypes.float64]:
            with self.session(graph=tf.Graph()):
                var0_np = np.array([1.0, 2.0], dtype=dtype.as_numpy_dtype)
                var1_np = np.array([3.0, 4.0], dtype=dtype.as_numpy_dtype)
                const0_np = np.array([1., 0.], dtype=dtype.as_numpy_dtype)
                const1_np = np.array([-1., -1.], dtype=dtype.as_numpy_dtype)
                const2_np = np.array([-1., 1.], dtype=dtype.as_numpy_dtype)

                var0 = tf.Variable(var0_np, dtype=dtype, name='first_var/var0')
                var1 = tf.Variable(var1_np,
                                   dtype=dtype,
                                   name='second_var/var1')
                const0 = tf.constant(const0_np)
                const1 = tf.constant(const1_np)
                const2 = tf.constant(const2_np)
                loss0 = tf.tensordot(var0, const0, 1) + tf.tensordot(
                    var1, const2, 1)
                loss1 = tf.tensordot(var0, const1, 1) + tf.tensordot(
                    var1, const0, 1)

                learning_rate = lambda: 0.001
                opt = tf.train.GradientDescentOptimizer(learning_rate)
                losses = loss0 + loss1
                opt_grads = opt.compute_gradients(losses,
                                                  var_list=[var0, var1])

                pcgrad_opt = pcgrad.PCGrad(
                    tf.train.GradientDescentOptimizer(learning_rate),
                    denylist=denylist,
                    allowlist=allowlist)
                pcgrad_col_opt = pcgrad.PCGrad(
                    tf.train.GradientDescentOptimizer(learning_rate),
                    use_collection_losses=True,
                    denylist=denylist,
                    allowlist=allowlist)
                losses = [loss0, loss1]
                pcgrad_grads = pcgrad_opt.compute_gradients(
                    losses, var_list=[var0, var1])
                tf.add_to_collection(pcgrad.PCGRAD_LOSSES_COLLECTION, loss0)
                tf.add_to_collection(pcgrad.PCGRAD_LOSSES_COLLECTION, loss1)
                pcgrad_grads_collection = pcgrad_col_opt.compute_gradients(
                    None, var_list=[var0, var1])

                with tf.Graph().as_default():
                    # Shouldn't return non-slot variables from other graphs.
                    self.assertEmpty(opt.variables())

                self.evaluate(tf.global_variables_initializer())
                grad_vec, pcgrad_vec, pcgrad_col_vec = self.evaluate(
                    [opt_grads, pcgrad_grads, pcgrad_grads_collection])
                # Make sure that both methods take grads of the same vars.
                self.assertAllCloseAccordingToType(pcgrad_vec, pcgrad_col_vec)

                results = [{
                    'var': var0,
                    'pcgrad_vec': [0.5, -1.5],
                    'result': [0.9995, 2.0015]
                }, {
                    'var': var1,
                    'pcgrad_vec': [0.5, 1.5],
                    'result': [2.9995, 3.9985]
                }]
                grad_var_idx = {0, 1}.difference(pcgrad_var_idx)

                self.assertAllCloseAccordingToType(grad_vec[0][0], [0.0, -1.0],
                                                   atol=1e-5)
                self.assertAllCloseAccordingToType(grad_vec[1][0], [0.0, 1.0],
                                                   atol=1e-5)
                pcgrad_vec_idx = 0
                for var_idx in pcgrad_var_idx:
                    self.assertAllCloseAccordingToType(
                        pcgrad_vec[pcgrad_vec_idx][0],
                        results[var_idx]['pcgrad_vec'],
                        atol=1e-5)
                    pcgrad_vec_idx += 1

                for var_idx in grad_var_idx:
                    self.assertAllCloseAccordingToType(
                        pcgrad_vec[pcgrad_vec_idx][0],
                        grad_vec[var_idx][0],
                        atol=1e-5)
                    pcgrad_vec_idx += 1

                self.evaluate(opt.apply_gradients(pcgrad_grads))
                self.assertAllCloseAccordingToType(
                    self.evaluate(
                        [results[idx]['var'] for idx in pcgrad_var_idx]),
                    [results[idx]['result'] for idx in pcgrad_var_idx])
コード例 #20
0
    def __init__(
        self,
        estimator: "SPEECH_RECOGNIZER_TYPE",
        masker: "PsychoacousticMasker",
        eps: float = 2000.0,
        learning_rate_1: float = 100.0,
        max_iter_1: int = 1000,
        alpha: float = 0.05,
        learning_rate_2: float = 1.0,
        max_iter_2: int = 4000,
        loss_theta_min: float = 0.05,
        decrease_factor_eps: float = 0.8,
        num_iter_decrease_eps: int = 10,
        increase_factor_alpha: float = 1.2,
        num_iter_increase_alpha: int = 20,
        decrease_factor_alpha: float = 0.8,
        num_iter_decrease_alpha: int = 50,
        batch_size: int = 1,
    ) -> None:
        """
        Create an instance of the :class:`.ImperceptibleASR`.

        The default parameters assume that audio input is in `int16` range. If using normalized audio input, parameters
        `eps` and `learning_rate_{1,2}` need to be scaled with a factor `2^-15`

        :param estimator: A trained speech recognition estimator.
        :param masker: A Psychoacoustic masker.
        :param eps: Initial max norm bound for adversarial perturbation.
        :param learning_rate_1: Learning rate for stage 1 of attack.
        :param max_iter_1: Number of iterations for stage 1 of attack.
        :param alpha: Initial alpha value for balancing stage 2 loss.
        :param learning_rate_2: Learning rate for stage 2 of attack.
        :param max_iter_2: Number of iterations for stage 2 of attack.
        :param loss_theta_min: If imperceptible loss reaches minimum, stop early. Works best with `batch_size=1`.
        :param decrease_factor_eps: Decrease factor for epsilon (Paper default: 0.8).
        :param num_iter_decrease_eps: Iterations after which to decrease epsilon if attack succeeds (Paper default: 10).
        :param increase_factor_alpha: Increase factor for alpha (Paper default: 1.2).
        :param num_iter_increase_alpha: Iterations after which to increase alpha if attack succeeds (Paper default: 20).
        :param decrease_factor_alpha: Decrease factor for alpha (Paper default: 0.8).
        :param num_iter_decrease_alpha: Iterations after which to decrease alpha if attack fails (Paper default: 50).
        :param batch_size: Batch size.
        """

        # Super initialization
        super().__init__(estimator=estimator)
        self.masker = masker
        self.eps = eps
        self.learning_rate_1 = learning_rate_1
        self.max_iter_1 = max_iter_1
        self.alpha = alpha
        self.learning_rate_2 = learning_rate_2
        self.max_iter_2 = max_iter_2
        self._targeted = True
        self.batch_size = batch_size
        self.loss_theta_min = loss_theta_min
        self.decrease_factor_eps = decrease_factor_eps
        self.num_iter_decrease_eps = num_iter_decrease_eps
        self.increase_factor_alpha = increase_factor_alpha
        self.num_iter_increase_alpha = num_iter_increase_alpha
        self.decrease_factor_alpha = decrease_factor_alpha
        self.num_iter_decrease_alpha = num_iter_decrease_alpha
        self._check_params()

        # init some aliases
        self._window_size = masker.window_size
        self._hop_size = masker.hop_size
        self._sample_rate = masker.sample_rate

        self._framework: Optional[str] = None

        if isinstance(self.estimator, TensorFlowV2Estimator):
            import tensorflow.compat.v1 as tf1

            # set framework attribute
            self._framework = "tensorflow"

            # disable eager execution and use tensorflow.compat.v1 API, e.g. Lingvo uses TF2v1 AP
            tf1.disable_eager_execution()

            # TensorFlow placeholders
            self._delta = tf1.placeholder(tf1.float32,
                                          shape=[None, None],
                                          name="art_delta")
            self._power_spectral_density_maximum_tf = tf1.placeholder(
                tf1.float32, shape=[None], name="art_psd_max")
            self._masking_threshold_tf = tf1.placeholder(
                tf1.float32,
                shape=[None, None, None],
                name="art_masking_threshold")
            # TensorFlow loss gradient ops
            self._loss_gradient_masking_threshold_op_tf = self._loss_gradient_masking_threshold_tf(
                self._delta, self._power_spectral_density_maximum_tf,
                self._masking_threshold_tf)

        elif isinstance(self.estimator, PyTorchEstimator):
            # set framework attribute
            self._framework = "pytorch"
コード例 #21
0
 def test_sum(self):
     tf.disable_eager_execution()
     nodes = tf.constant([1, 3])
     nodes2 = tf.constant([1, 3])
     fused = efficientdet_arch.fuse_features([nodes, nodes2], 'sum')
     self.assertAllCloseAccordingToType(fused, [2, 6])
コード例 #22
0
def main(_):
  tf.disable_eager_execution()

  if not FLAGS.dataset_dir:
    raise ValueError('You must supply the dataset directory with --dataset_dir')

  tf.logging.set_verbosity(tf.logging.INFO)
  with tf.Graph().as_default():
    ######################
    # Select the dataset #
    ######################
    dataset = dataset_factory.get_dataset(
        FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)

    #########################
    # Configure the network #
    #########################
    inception_params = network_params.InceptionV3FCNParams(
        receptive_field_size=FLAGS.receptive_field_size,
        prelogit_dropout_keep_prob=0.8,
        depth_multiplier=0.1,
        min_depth=16,
        inception_fcn_stride=0,
    )
    conv_params = network_params.ConvScopeParams(
        dropout=False,
        dropout_keep_prob=0.8,
        batch_norm=True,
        batch_norm_decay=0.99,
        l2_weight_decay=4e-05,
    )
    network_fn = inception_v3_fcn.get_inception_v3_fcn_network_fn(
        inception_params,
        conv_params,
        num_classes=dataset.num_classes,
        is_training=True,
    )

    #####################################
    # Select the preprocessing function #
    #####################################
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
        'inception_v3', is_training=True)

    ##############################################################
    # Create a dataset provider that loads data from the dataset #
    ##############################################################
    provider = slim.dataset_data_provider.DatasetDataProvider(
        dataset,
        num_readers=DATASET_READERS,
        common_queue_capacity=20 * FLAGS.batch_size,
        common_queue_min=10 * FLAGS.batch_size)
    [image, label] = provider.get(['image', 'label'])
    train_image_size = FLAGS.receptive_field_size
    image = image_preprocessing_fn(image, train_image_size, train_image_size)
    images, labels = tf.train.batch([image, label],
                                    batch_size=FLAGS.batch_size,
                                    num_threads=PREPROCESSING_THREADS,
                                    capacity=5 * FLAGS.batch_size)
    labels = slim.one_hot_encoding(labels, dataset.num_classes)

    ####################
    # Define the model #
    ####################
    logits, _ = network_fn(images)

    slim.losses.softmax_cross_entropy(logits, labels)
    total_loss = slim.losses.get_total_loss()
    tf.summary.scalar('losses/Total_Loss', total_loss)

    optimizer = tf.train.RMSPropOptimizer(0.01)

    train_op = slim.learning.create_train_op(
        total_loss,
        optimizer,
        variables_to_train=_get_variables_to_train())

    ###########################
    # Kicks off the training. #
    ###########################
    slim.learning.train(
        train_op,
        logdir=FLAGS.train_dir,
        init_fn=_get_init_fn(),
        number_of_steps=FLAGS.max_number_of_steps,
        log_every_n_steps=FLAGS.log_every_n_steps,
        save_summaries_secs=FLAGS.save_summaries_secs,
        save_interval_secs=FLAGS.save_interval_secs,
        session_config=tf.ConfigProto(allow_soft_placement=True))
コード例 #23
0
def validate_yolo_model_pb(model_path, image_file, anchors, class_names,
                           model_image_size, elim_grid_sense, v5_decode,
                           loop_count):
    # check tf version to be compatible with TF 2.x
    global tf
    if tf.__version__.startswith('2'):
        import tensorflow.compat.v1 as tf
        tf.disable_eager_execution()

    # NOTE: TF 1.x frozen pb graph need to specify input/output tensor name
    # so we hardcode the input/output tensor names here to get them from model
    if len(anchors) == 6:
        output_tensor_names = [
            'graph/predict_conv_1/BiasAdd:0', 'graph/predict_conv_2/BiasAdd:0'
        ]
    elif len(anchors) == 9:
        output_tensor_names = [
            'graph/predict_conv_1/BiasAdd:0', 'graph/predict_conv_2/BiasAdd:0',
            'graph/predict_conv_3/BiasAdd:0'
        ]
    elif len(anchors) == 5:
        # YOLOv2 use 5 anchors and have only 1 prediction
        output_tensor_names = ['graph/predict_conv/BiasAdd:0']
    else:
        raise ValueError('invalid anchor number')

    # assume only 1 input tensor for image
    input_tensor_name = 'graph/image_input:0'

    #load frozen pb graph
    def load_pb_graph(model_path):
        # We parse the graph_def file
        with tf.gfile.GFile(model_path, "rb") as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())

        # We load the graph_def in the default graph
        with tf.Graph().as_default() as graph:
            tf.import_graph_def(graph_def,
                                input_map=None,
                                return_elements=None,
                                name="graph",
                                op_dict=None,
                                producer_op_list=None)
        return graph

    graph = load_pb_graph(model_path)

    # We can list operations, op.values() gives you a list of tensors it produces
    # op.name gives you the name. These op also include input & output node
    # print output like:
    # prefix/Placeholder/inputs_placeholder
    # ...
    # prefix/Accuracy/predictions
    #
    # NOTE: prefix/Placeholder/inputs_placeholder is only op's name.
    # tensor name should be like prefix/Placeholder/inputs_placeholder:0

    #for op in graph.get_operations():
    #print(op.name, op.values())

    image_input = graph.get_tensor_by_name(input_tensor_name)
    output_tensors = [
        graph.get_tensor_by_name(output_tensor_name)
        for output_tensor_name in output_tensor_names
    ]

    batch, height, width, channel = image_input.shape
    model_image_size = (int(height), int(width))

    img = Image.open(image_file)
    image = np.array(img, dtype='uint8')
    image_data = preprocess_image(img, model_image_size)
    #origin image shape, in (height, width) format
    image_shape = tuple(reversed(img.size))

    # predict once first to bypass the model building time
    with tf.Session(graph=graph) as sess:
        prediction = sess.run(output_tensors,
                              feed_dict={image_input: image_data})

    start = time.time()
    for i in range(loop_count):
        with tf.Session(graph=graph) as sess:
            prediction = sess.run(output_tensors,
                                  feed_dict={image_input: image_data})
    end = time.time()
    print("Average Inference time: {:.8f}ms".format(
        (end - start) * 1000 / loop_count))

    prediction.sort(key=lambda x: len(x[0]))
    handle_prediction(prediction, image_file, image, image_shape, anchors,
                      class_names, model_image_size, elim_grid_sense,
                      v5_decode)
コード例 #24
0
def load_tf_weights_in_bert_generation(
    model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False
):
    try:
        import numpy as np
        import tensorflow.compat.v1 as tf

        import tensorflow_hub as hub
        import tensorflow_text  # noqa: F401

        tf.disable_eager_execution()
    except ImportError:
        logger.error(
            "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
            "https://www.tensorflow.org/install/ for installation instructions."
        )
        raise
    tf_model = hub.Module(tf_hub_path)
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        init.run()
        all_variables = tf_model.variable_map
        keep_track_variables = all_variables.copy()
        for key in list(all_variables.keys()):
            if "global" in key:
                logger.info(f"Skipping {key}...")
                continue
            if not is_encoder:
                model_pointer = getattr(model, model_class)
            else:
                model_pointer = model
            is_embedding = False
            logger.info(f"Trying to match {key}...")
            # remove start_string = "module/bert/"
            sub_layers = key.split("/")[2:]
            if is_encoder_named_decoder and sub_layers[0] == "encoder":
                logger.info(f"Skipping encoder layer {key} for decoder")
                continue
            if is_encoder and sub_layers[0] == "decoder":
                logger.info(f"Skipping decoder layer {key} for encoder")
                continue
            for i, sub_layer in enumerate(sub_layers):
                if sub_layer == "embeddings":
                    is_embedding = True
                elif sub_layer == "LayerNorm":
                    is_embedding = False
                if "layer" in sub_layer:
                    model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])]
                elif sub_layer in ["kernel", "gamma"]:
                    model_pointer = model_pointer.weight
                elif sub_layer == "beta":
                    model_pointer = model_pointer.bias
                elif sub_layer == "encdec":
                    model_pointer = model_pointer.crossattention.self
                elif sub_layer == "encdec_output":
                    model_pointer = model_pointer.crossattention.output
                elif is_encoder_named_decoder and sub_layer == "decoder":
                    model_pointer = model_pointer.encoder
                else:
                    if sub_layer == "attention" and "encdec" in sub_layers[i + 1]:
                        continue
                    try:
                        model_pointer = getattr(model_pointer, sub_layer)
                    except AttributeError:
                        logger.info(f"Skipping to initialize {key} at {sub_layer}...")
                        raise AttributeError

            array = np.asarray(sess.run(all_variables[key]))
            if not is_embedding:
                logger.info(f"Transposing numpy weight of shape {array.shape} for {key}")
                array = np.transpose(array)
            else:
                model_pointer = model_pointer.weight

            if model_pointer.shape != array.shape:
                raise ValueError(f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched")
            logger.info(f"Initialize PyTorch weight {key}")

            model_pointer.data = torch.from_numpy(array.astype(np.float32))
            keep_track_variables.pop(key, None)

        logger.info(f"Weights not copied to PyTorch model: {', '.join(keep_track_variables.keys())}")
        return model
コード例 #25
0
def setUpModule():
    tf.disable_eager_execution()
コード例 #26
0
ファイル: Interaction.py プロジェクト: Andriannaa/Review-daml
# encoding: utf-8
#import tensorflow as tf
import tensorflow.compat.v1 as tf
tf.disable_eager_execution()  # 忽略占位符
import numpy as np
#from auxiliaryTools.ExtractData import Dataset
#from auxiliaryTools.GetTest import get_test_list
from ExtractData import Dataset
from GetTest import get_test_list
from time import time
import math, os


def get_train_instance(train):
    user_input, item_input, rates = [], [], []

    for (u, i) in train.keys():
        # positive instance
        user_input.append(u)
        item_input.append(i)
        rates.append(train[u, i])
    return user_input, item_input, rates


def get_train_instance_batch_change(count, batch_size, user_input, item_input,
                                    ratings, user_reviews, item_reviews):
    users_batch, items_batch, user_input_batch, item_input_batch, labels_batch = [], [], [], [], []

    for idx in range(batch_size):
        index = (count * batch_size + idx) % len(user_input)
        users_batch.append(user_input[index])
コード例 #27
0
def load_tf_graph_def(graph_file_name: str = "",
                      is_binary: bool = True,
                      checkpoint: str = "",
                      model_dir: str = "",
                      saved_model_tags: list = [],
                      meta_graph_file: str = "",
                      user_output_node_names_list: list = []):
    # As a provisional solution, use a native TF methods to load a model protobuf
    graph_def = tf_v1.GraphDef()
    if isinstance(graph_file_name, str) and (re.match('.*\.(ckpt|meta)$',
                                                      graph_file_name)):
        print(
            '[ WARNING ] The value for the --input_model command line parameter ends with ".ckpt" or ".meta" '
            'extension.\n'
            'It means that the model is not frozen.\n'
            'To load non frozen model to Model Optimizer run:'
            '\n\n1. For "*.ckpt" file:'
            '\n- if inference graph is in binary format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pb" --input_checkpoint "path/to/*.ckpt"'
            '\n- if inference graph is in text format'
            '\npython3 mo_tf.py --input_model "path/to/inference_graph.pbtxt" --input_model_is_text '
            '--input_checkpoint "path/to/*.ckpt"'
            '\n\n2. For "*.meta" file:'
            '\npython3 mo_tf.py --input_meta_graph "path/to/*.meta"')
    variables_values = {}
    try:
        if graph_file_name and not meta_graph_file and not checkpoint:
            # frozen graph
            return read_file_to_graph_def(graph_def, graph_file_name,
                                          is_binary), variables_values
        if graph_file_name and not meta_graph_file and checkpoint:
            # inference graph and checkpoint
            graph_def = read_file_to_graph_def(graph_def, graph_file_name,
                                               is_binary)
            outputs = get_output_node_names_list(graph_def,
                                                 user_output_node_names_list)
            if os.path.isfile(checkpoint):
                graph_def = freeze_checkpoint(graph_def=graph_def,
                                              checkpoint=checkpoint,
                                              output_node_names=outputs)
            elif os.path.isdir(checkpoint):
                graph_def, variables_values = freeze_checkpoints(
                    graph_def=graph_def,
                    checkpoint_dir=checkpoint,
                    output_node_names=outputs)
            # we are sure that checkpoint is existing file or directory due to cli_parser configuration
            return graph_def, variables_values
        if not graph_file_name and meta_graph_file:
            meta_graph_file = deducing_metagraph_path(meta_graph_file)
            input_meta_graph_def = read_file_to_graph_def(
                tf_v1.MetaGraphDef(), meta_graph_file, is_binary)
            # pylint: disable=no-member
            with tf_v1.Session() as sess:
                restorer = tf_v1.train.import_meta_graph(input_meta_graph_def)
                restorer.restore(sess, re.sub('\.meta$', '', meta_graph_file))
                outputs = get_output_node_names_list(
                    input_meta_graph_def.graph_def,
                    user_output_node_names_list)
                graph_def = tf_v1.graph_util.convert_variables_to_constants(
                    sess, input_meta_graph_def.graph_def, outputs)
                return graph_def, variables_values
        if model_dir:
            # saved model directory
            try:
                env_setup = get_environment_setup()
                # enable eager execution temporarily while TensorFlow 2 model is being loaded
                tf_v1.enable_eager_execution()
                # code to extract GraphDef for TF 2.0 SavedModel format
                # tf.saved_model.load function throws TypeError for TF 1.x SavedModel format in case TF 1.x installed
                imported = tf.saved_model.load(model_dir, saved_model_tags)  # pylint: disable=E1120
                # to get a signature by key throws KeyError for TF 1.x SavedModel format in case TF 2.x installed
                concrete_func = imported.signatures[
                    tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
                # the aggressive inlining parameter needs to freeze a table of embeddings for Keras Embedding operation
                # and a model with Embedding operation cannot properly converted to IR without this function parameter
                if "tensorflow" in env_setup and env_setup[
                        "tensorflow"] >= LooseVersion("2.2.0"):
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func,
                        lower_control_flow=False,
                        aggressive_inlining=True)  # pylint: disable=E1123
                else:
                    frozen_func = convert_variables_to_constants_v2(
                        concrete_func, lower_control_flow=False)  # pylint: disable=E1123
                graph_def = frozen_func.graph.as_graph_def(add_shapes=True)
                # disable eager execution since next steps are executed with a graph in non-eager mode
                tf_v1.disable_eager_execution()
                return graph_def, variables_values
            except (TypeError, KeyError):
                # disable eager execution since TensorFlow 1 model is handled
                tf_v1.disable_eager_execution()
                # code to extract GraphDef for TF 1.0 SavedModel format
                tags = saved_model_tags if saved_model_tags is not None else [
                    tf_v1.saved_model.tag_constants.SERVING
                ]
                with tf_v1.Session() as sess:
                    meta_graph_def = tf_v1.saved_model.loader.load(
                        sess, tags, model_dir)
                    outputs = get_output_node_names_list(
                        meta_graph_def.graph_def, user_output_node_names_list)
                    graph_def = tf_v1.graph_util.convert_variables_to_constants(
                        sess, meta_graph_def.graph_def, outputs)
                    return graph_def, variables_values
            except Exception as e:
                raise FrameworkError('SavedModel format load failure: {}',
                                     e) from e
    except Exception as e:
        raise FrameworkError('Cannot load input model: {}', e) from e
    raise Error("Unknown configuration of input model parameters")
コード例 #28
0
def main(_):
  if FLAGS.strategy == 'tpu':
    tf.disable_eager_execution()
    tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
        FLAGS.tpu, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
    tpu_grpc_url = tpu_cluster_resolver.get_master()
    tf.Session.reset(tpu_grpc_url)
  else:
    tpu_cluster_resolver = None

  # Check data path
  if FLAGS.mode in ('train', 'train_and_eval'):
    if FLAGS.training_file_pattern is None:
      raise RuntimeError('Must specify --training_file_pattern for train.')
  if FLAGS.mode in ('eval', 'train_and_eval'):
    if FLAGS.validation_file_pattern is None:
      raise RuntimeError('Must specify --validation_file_pattern for eval.')

  # Parse and override hparams
  config = hparams_config.get_detection_config(FLAGS.model_name)
  config.override(FLAGS.hparams)
  if FLAGS.num_epochs:  # NOTE: remove this flag after updating all docs.
    config.num_epochs = FLAGS.num_epochs

  # Parse image size in case it is in string format.
  config.image_size = utils.parse_image_size(config.image_size)

  # The following is for spatial partitioning. `features` has one tensor while
  # `labels` had 4 + (`max_level` - `min_level` + 1) * 2 tensors. The input
  # partition is performed on `features` and all partitionable tensors of
  # `labels`, see the partition logic below.
  # In the TPUEstimator context, the meaning of `shard` and `replica` is the
  # same; follwing the API, here has mixed use of both.
  if FLAGS.use_spatial_partition:
    # Checks input_partition_dims agrees with num_cores_per_replica.
    if FLAGS.num_cores_per_replica != np.prod(FLAGS.input_partition_dims):
      raise RuntimeError('--num_cores_per_replica must be a product of array'
                         'elements in --input_partition_dims.')

    labels_partition_dims = {
        'mean_num_positives': None,
        'source_ids': None,
        'groundtruth_data': None,
        'image_scales': None,
        'image_masks': None,
    }
    # The Input Partition Logic: We partition only the partition-able tensors.
    feat_sizes = utils.get_feat_sizes(
        config.get('image_size'), config.get('max_level'))
    for level in range(config.get('min_level'), config.get('max_level') + 1):

      def _can_partition(spatial_dim):
        partitionable_index = np.where(
            spatial_dim % np.array(FLAGS.input_partition_dims) == 0)
        return len(partitionable_index[0]) == len(FLAGS.input_partition_dims)

      spatial_dim = feat_sizes[level]
      if _can_partition(spatial_dim['height']) and _can_partition(
          spatial_dim['width']):
        labels_partition_dims['box_targets_%d' %
                              level] = FLAGS.input_partition_dims
        labels_partition_dims['cls_targets_%d' %
                              level] = FLAGS.input_partition_dims
      else:
        labels_partition_dims['box_targets_%d' % level] = None
        labels_partition_dims['cls_targets_%d' % level] = None
    num_cores_per_replica = FLAGS.num_cores_per_replica
    input_partition_dims = [FLAGS.input_partition_dims, labels_partition_dims]
    num_shards = FLAGS.num_cores // num_cores_per_replica
  else:
    num_cores_per_replica = None
    input_partition_dims = None
    num_shards = FLAGS.num_cores

  params = dict(
      config.as_dict(),
      model_name=FLAGS.model_name,
      iterations_per_loop=FLAGS.iterations_per_loop,
      model_dir=FLAGS.model_dir,
      num_shards=num_shards,
      num_examples_per_epoch=FLAGS.num_examples_per_epoch,
      strategy=FLAGS.strategy,
      backbone_ckpt=FLAGS.backbone_ckpt,
      ckpt=FLAGS.ckpt,
      val_json_file=FLAGS.val_json_file,
      testdev_dir=FLAGS.testdev_dir,
      profile=FLAGS.profile,
      mode=FLAGS.mode)
  config_proto = tf.ConfigProto(
      allow_soft_placement=True, log_device_placement=False)
  if FLAGS.strategy != 'tpu':
    if FLAGS.use_xla:
      config_proto.graph_options.optimizer_options.global_jit_level = (
          tf.OptimizerOptions.ON_1)
      config_proto.gpu_options.allow_growth = True

  model_dir = FLAGS.model_dir
  model_fn_instance = det_model_fn.get_model_fn(FLAGS.model_name)
  max_instances_per_image = config.max_instances_per_image
  if FLAGS.eval_samples:
    eval_steps = int((FLAGS.eval_samples + FLAGS.eval_batch_size - 1) //
                     FLAGS.eval_batch_size)
  else:
    eval_steps = None
  total_examples = int(config.num_epochs * FLAGS.num_examples_per_epoch)
  train_steps = total_examples // FLAGS.train_batch_size
  logging.info(params)

  if not tf.io.gfile.exists(model_dir):
    tf.io.gfile.makedirs(model_dir)

  config_file = os.path.join(model_dir, 'config.yaml')
  if not tf.io.gfile.exists(config_file):
    tf.io.gfile.GFile(config_file, 'w').write(str(config))

  train_input_fn = dataloader.InputReader(
      FLAGS.training_file_pattern,
      is_training=True,
      use_fake_data=FLAGS.use_fake_data,
      max_instances_per_image=max_instances_per_image)
  eval_input_fn = dataloader.InputReader(
      FLAGS.validation_file_pattern,
      is_training=False,
      use_fake_data=FLAGS.use_fake_data,
      max_instances_per_image=max_instances_per_image)

  if FLAGS.strategy == 'tpu':
    tpu_config = tf.estimator.tpu.TPUConfig(
        FLAGS.iterations_per_loop if FLAGS.strategy == 'tpu' else 1,
        num_cores_per_replica=num_cores_per_replica,
        input_partition_dims=input_partition_dims,
        per_host_input_for_training=tf.estimator.tpu.InputPipelineConfig
        .PER_HOST_V2)
    run_config = tf.estimator.tpu.RunConfig(
        cluster=tpu_cluster_resolver,
        model_dir=model_dir,
        log_step_count_steps=FLAGS.iterations_per_loop,
        session_config=config_proto,
        tpu_config=tpu_config,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        tf_random_seed=FLAGS.tf_random_seed,
    )
    # TPUEstimator can do both train and eval.
    train_est = tf.estimator.tpu.TPUEstimator(
        model_fn=model_fn_instance,
        train_batch_size=FLAGS.train_batch_size,
        eval_batch_size=FLAGS.eval_batch_size,
        config=run_config,
        params=params)
    eval_est = train_est
  else:
    strategy = None
    if FLAGS.strategy == 'gpus':
      strategy = tf.distribute.MirroredStrategy()
    run_config = tf.estimator.RunConfig(
        model_dir=model_dir,
        train_distribute=strategy,
        log_step_count_steps=FLAGS.iterations_per_loop,
        session_config=config_proto,
        save_checkpoints_steps=FLAGS.save_checkpoints_steps,
        tf_random_seed=FLAGS.tf_random_seed,
    )

    def get_estimator(global_batch_size):
      params['num_shards'] = getattr(strategy, 'num_replicas_in_sync', 1)
      params['batch_size'] = global_batch_size // params['num_shards']
      return tf.estimator.Estimator(
          model_fn=model_fn_instance, config=run_config, params=params)

    # train and eval need different estimator due to different batch size.
    train_est = get_estimator(FLAGS.train_batch_size)
    eval_est = get_estimator(FLAGS.eval_batch_size)

  # start train/eval flow.
  if FLAGS.mode == 'train':
    train_est.train(input_fn=train_input_fn, max_steps=train_steps)
    if FLAGS.eval_after_training:
      eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)

  elif FLAGS.mode == 'eval':
    # Run evaluation when there's a new checkpoint
    for ckpt in tf.train.checkpoints_iterator(
        FLAGS.model_dir,
        min_interval_secs=FLAGS.min_eval_interval,
        timeout=FLAGS.eval_timeout):

      logging.info('Starting to evaluate.')
      try:
        eval_results = eval_est.evaluate(eval_input_fn, steps=eval_steps)
        # Terminate eval job when final checkpoint is reached.
        try:
          current_step = int(os.path.basename(ckpt).split('-')[1])
        except IndexError:
          logging.info('%s has no global step info: stop!', ckpt)
          break

        utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)
        if current_step >= train_steps:
          logging.info('Eval finished step %d/%d', current_step, train_steps)
          break

      except tf.errors.NotFoundError:
        # Checkpoint might be not already deleted by the time eval finished.
        # We simply skip ssuch case.
        logging.info('Checkpoint %s no longer exists, skipping.', ckpt)

  elif FLAGS.mode == 'train_and_eval':
    ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
    try:
      step = int(os.path.basename(ckpt).split('-')[1])
      current_epoch = (
          step * FLAGS.train_batch_size // FLAGS.num_examples_per_epoch)
      logging.info('found ckpt at step %d (epoch %d)', step, current_epoch)
    except (IndexError, TypeError):
      logging.info('Folder %s has no ckpt with valid step.', FLAGS.model_dir)
      current_epoch = 0

    def run_train_and_eval(e):
      print('\n   =====> Starting training, epoch: %d.' % e)
      train_est.train(
          input_fn=train_input_fn,
          max_steps=e * FLAGS.num_examples_per_epoch // FLAGS.train_batch_size)
      print('\n   =====> Starting evaluation, epoch: %d.' % e)
      eval_results = eval_est.evaluate(input_fn=eval_input_fn, steps=eval_steps)
      ckpt = tf.train.latest_checkpoint(FLAGS.model_dir)
      utils.archive_ckpt(eval_results, eval_results['AP'], ckpt)

    epochs_per_cycle = 1  # higher number has less graph construction overhead.
    for e in range(current_epoch + 1, config.num_epochs + 1, epochs_per_cycle):
      if FLAGS.run_epoch_in_child_process:
        p = multiprocessing.Process(target=run_train_and_eval, args=(e,))
        p.start()
        p.join()
        if p.exitcode != 0:
          return p.exitcode
      else:
        tf.compat.v1.reset_default_graph()
        run_train_and_eval(e)

  else:
    logging.info('Invalid mode: %s', FLAGS.mode)
コード例 #29
0
ファイル: model_inspect.py プロジェクト: 00LT00/oims
                               tensorrt=FLAGS.tensorrt,
                               use_xla=FLAGS.use_xla,
                               ckpt_path=FLAGS.ckpt_path,
                               export_ckpt=FLAGS.export_ckpt,
                               saved_model_dir=FLAGS.saved_model_dir,
                               tflite_path=FLAGS.tflite_path,
                               batch_size=FLAGS.batch_size,
                               hparams=FLAGS.hparams,
                               score_thresh=FLAGS.min_score_thresh,
                               max_output_size=FLAGS.max_boxes_to_draw,
                               nms_method=FLAGS.nms_method)
    inspector.run_model(FLAGS.runmode,
                        input_image=FLAGS.input_image,
                        output_image_dir=FLAGS.output_image_dir,
                        input_video=FLAGS.input_video,
                        output_video=FLAGS.output_video,
                        line_thickness=FLAGS.line_thickness,
                        max_boxes_to_draw=FLAGS.max_boxes_to_draw,
                        min_score_thresh=FLAGS.min_score_thresh,
                        nms_method=FLAGS.nms_method,
                        bm_runs=FLAGS.bm_runs,
                        threads=FLAGS.threads,
                        trace_filename=FLAGS.trace_filename)


if __name__ == '__main__':
    logging.set_verbosity(logging.WARNING)
    tf.enable_v2_tensorshape()
    tf.disable_eager_execution()
    app.run(main)
コード例 #30
0
def main(_):
  logging.info("Seed: %d", FLAGS.seed)
  np.random.seed(FLAGS.seed)
  torch.random.manual_seed(FLAGS.seed)

  if FLAGS.save_freq % FLAGS.small_eval_freq != 0:
    raise ValueError(
        ("Save frequency ({}) must be a multiple of evaluation frequency ({})."
         " Allows choosing checkpoints based on their evaluation scores.")
        .format(FLAGS.save_freq, FLAGS.small_eval_freq))

  if FLAGS.full_eval_freq % FLAGS.small_eval_freq != 0:
    raise ValueError(
        ("Full evaluation frequency ({}) must be a multiple of small"
         " evaluation frequency ({}) so that their values can be compared.")
        .format(FLAGS.full_eval_freq, FLAGS.small_eval_freq))

  exp_dir = os.path.join(FLAGS.experiment_base_dir, FLAGS.experiment_name)
  common_utils.create_experiment_directory(exp_dir, FLAGS.force_overwrite)
  tensorboard_dir = os.path.join(exp_dir, "tensorboard")
  tf.disable_eager_execution()
  tb_writer = tf.summary.FileWriter(tensorboard_dir)

  predictions_dir = os.path.join(exp_dir, "predictions")
  os.makedirs(predictions_dir, exist_ok=True)

  checkpoints_dir = os.path.join(exp_dir, "checkpoints")
  os.makedirs(checkpoints_dir, exist_ok=True)

  evict_trace_dir = os.path.join(exp_dir, "evictions")
  os.makedirs(evict_trace_dir, exist_ok=True)

  model_config = cfg.Config.from_files_and_bindings(
      FLAGS.model_configs, FLAGS.model_bindings)
  logging.info("Model config: %s", model_config)
  with open(os.path.join(exp_dir, "model_config.json"), "w") as f:
    model_config.to_file(f)

  cache_config = cfg.Config.from_files_and_bindings(
      FLAGS.cache_configs, FLAGS.cache_bindings)
  logging.info("Cache config: %s", cache_config)
  with open(os.path.join(exp_dir, "cache_config.json"), "w") as f:
    cache_config.to_file(f)

  dagger_schedule_config = cfg.Config.from_files_and_bindings(
      FLAGS.dagger_schedule_configs, FLAGS.dagger_schedule_bindings)
  logging.info("DAgger config: %s", dagger_schedule_config)
  with open(os.path.join(exp_dir, "dagger_config.json"), "w") as f:
    dagger_schedule_config.to_file(f)
  dagger_schedule = schedule_from_config(dagger_schedule_config)

  # Process everything on GPU if available
  device = torch.device("cpu")
  if torch.cuda.is_available():
    torch.set_default_tensor_type(torch.cuda.FloatTensor)
    device = torch.device("cuda:0")
  logging.info("Device: %s", device)

  policy_model = model.EvictionPolicyModel.from_config(model_config).to(device)
  optimizer = optim.Adam(policy_model.parameters(), lr=model_config.get("lr"))

  step = 0
  get_step = lambda: step
  oracle_valid_data, hit_rates = next(measure_cache_hit_rate(
      FLAGS.valid_memtrace, cache_config, policy_model,
      schedules.ConstantSchedule(0), get_step,
      os.path.join(evict_trace_dir, "oracle_valid.txt")))
  log_hit_rates(tb_writer, "cache_hit_rate/oracle_valid", hit_rates, step)

  with tqdm.tqdm(total=FLAGS.total_steps) as pbar:
    while True:  # loop for waiting until steps == FLAGS.total_steps
      # Optimization: Instead of passing through the whole memory trace for
      # training and only using update_freq many of them, we lazily gather k *
      # update_freq batches and still train on a subsample of update_freq.
      # The value of k=collection_multiplier trades off between:
      #   - The set of k * update_freq examples are all consecutive in the
      #   memory trace. As k gets small, the set of these examples becomes less
      #   i.i.d., as they are temporally correlated. The examples cannot be
      #   random access within the memory trace, since at time t, we require the
      #   previous cache accesses to compute the cache state at time t.
      #   - As k gets large, training becomes slower, as we must perform k times
      #   as much collecting work than training work.
      max_examples = (dagger_schedule_config.get("update_freq") *
                      FLAGS.collection_multiplier * FLAGS.batch_size)
      train_data_generator = measure_cache_hit_rate(
          FLAGS.train_memtrace, cache_config, policy_model, dagger_schedule,
          get_step, os.path.join(evict_trace_dir, "mixture-train-{}.txt"),
          max_examples=max_examples)
      for train_data, hit_rates in train_data_generator:
        log_hit_rates(
            tb_writer, "cache_hit_rate/train_mixture_policy", hit_rates, step)
        utils.log_scalar(
            tb_writer, "cache_hit_rate/mixture_parameter",
            dagger_schedule.value(step), step)

        for batch_num, batch in enumerate(utils.as_batches(
            [train_data], FLAGS.batch_size,
            model_config.get("sequence_length"))):
          def evaluate_helper(eval_size, suffix):
            """Evaluates the model on train / valid data on and off-policy.

            Args:
              eval_size (int): the number of examples to evaluate on.
              suffix (str): appended to all logging and tensorboard paths.
            """
            evaluate(policy_model, oracle_valid_data[-eval_size:], step,
                     "off_policy_valid" + suffix, tb_writer, predictions_dir)
            # train_data is defined in the loop, but evaluate_helper is only
            # called in the same loop iteration.
            # pylint: disable=cell-var-from-loop
            evaluate(policy_model, train_data[-eval_size:],
                     step, "train" + suffix, tb_writer, predictions_dir)
            # pylint: enable=cell-var-from-loop

            # Log the cache hit rates on portions of train / valid
            _, hit_rates = next(measure_cache_hit_rate(
                FLAGS.train_memtrace, cache_config, policy_model,
                schedules.ConstantSchedule(1), get_step,
                os.path.join(
                    evict_trace_dir, "train{}-{}.txt".format(suffix, step)),
                max_examples=eval_size, use_oracle_scores=False))
            log_hit_rates(
                tb_writer, "cache_hit_rate/train" + suffix, hit_rates, step)

            # Use oracle scores, since eviction trace in log_evaluate_stats will
            # log with on-policy scores.
            on_policy_valid_data, hit_rates = next(measure_cache_hit_rate(
                FLAGS.valid_memtrace, cache_config, policy_model,
                schedules.ConstantSchedule(1), get_step,
                os.path.join(
                    evict_trace_dir, "valid{}-{}.txt".format(suffix, step)),
                max_examples=eval_size))
            log_hit_rates(
                tb_writer, "cache_hit_rate/valid" + suffix, hit_rates, step)
            evaluate(policy_model, on_policy_valid_data[-eval_size:], step,
                     "on_policy_valid" + suffix, tb_writer, predictions_dir)

          if step % FLAGS.small_eval_freq == 0:
            evaluate_helper(FLAGS.small_eval_size, "")

          if step % FLAGS.full_eval_freq == 0:
            evaluate_helper(len(oracle_valid_data), "_full")

          if step % FLAGS.save_freq == 0 and step != 0:
            save_path = os.path.join(checkpoints_dir, "{}.ckpt".format(step))
            with open(save_path, "wb") as save_file:
              checkpoint_buffer = io.BytesIO()
              torch.save(policy_model.state_dict(), checkpoint_buffer)
              logging.info("Saving model checkpoint to: %s", save_path)
              save_file.write(checkpoint_buffer.getvalue())

          optimizer.zero_grad()
          losses = policy_model.loss(
              batch, model_config.get("sequence_length") // 2)
          total_loss = sum(losses.values())
          total_loss.backward()
          optimizer.step()
          pbar.update(1)
          step += 1

          if step % FLAGS.tb_freq == 0:
            utils.log_scalar(tb_writer, "loss/total", total_loss, step)
            for loss_name, loss_value in losses.items():
              utils.log_scalar(
                  tb_writer, "loss/{}".format(loss_name), loss_value, step)

          if step == FLAGS.total_steps:
            return

          # Break out of inner-loop to get next set of k * update_freq batches
          if batch_num == dagger_schedule_config.get("update_freq"):
            break