Пример #1
0
def main(_argv):
  """Main functions. Runs all anaylses."""
  # pylint: disable=W0212
  tfprof_logger._merge_default_with_oplog = merge_default_with_oplog

  FLAGS.model_dir = os.path.abspath(os.path.expanduser(FLAGS.model_dir))
  output_dir = os.path.join(FLAGS.model_dir, "profile")
  gfile.MakeDirs(output_dir)

  run_meta, graph, op_log = load_metadata(FLAGS.model_dir)

  param_arguments = [
      param_analysis_options(output_dir),
      micro_anaylsis_options(output_dir),
      flops_analysis_options(output_dir),
      device_analysis_options(output_dir),
  ]

  for tfprof_cmd, params in param_arguments:
    model_analyzer.print_model_analysis(
        graph=graph,
        run_meta=run_meta,
        op_log=op_log,
        tfprof_cmd=tfprof_cmd,
        tfprof_options=params)

    if params["dump_to_file"] != "":
      print("Wrote {}".format(params["dump_to_file"]))
Пример #2
0
def main(_argv):
  """Main functions. Runs all anaylses."""
  # pylint: disable=W0212
  tfprof_logger._merge_default_with_oplog = merge_default_with_oplog

  FLAGS.model_dir = os.path.abspath(os.path.expanduser(FLAGS.model_dir))
  output_dir = os.path.join(FLAGS.model_dir, "profile")
  gfile.MakeDirs(output_dir)

  run_meta, graph, op_log = load_metadata(FLAGS.model_dir)

  param_arguments = [
      param_analysis_options(output_dir),
      micro_anaylsis_options(output_dir),
      flops_analysis_options(output_dir),
      device_analysis_options(output_dir),
  ]

  for tfprof_cmd, params in param_arguments:
    model_analyzer.print_model_analysis(
        graph=graph,
        run_meta=run_meta,
        op_log=op_log,
        tfprof_cmd=tfprof_cmd,
        tfprof_options=params)

    if params["dump_to_file"] != "":
      print("Wrote {}".format(params["dump_to_file"]))
Пример #3
0
  def test_model_size_less_then1_gb(self):
    # NOTE: Actual amount of memory occupied my TF during training will be at
    # least 4X times bigger because of space need to store original weights,
    # updates, gradients and variances. It also depends on the type of used
    # optimizer.
    ocr_model = self.create_model()
    ocr_model.create_base(images=self.fake_images, labels_one_hot=None)
    with self.test_session() as sess:
      tfprof_root = model_analyzer.print_model_analysis(
          sess.graph,
          tfprof_options=model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)

      model_size_bytes = 4 * tfprof_root.total_parameters
      self.assertLess(model_size_bytes, 1 * 2**30)
Пример #4
0
  def test_model_size_less_then1_gb(self):
    # NOTE: Actual amount of memory occupied my TF during training will be at
    # least 4X times bigger because of space need to store original weights,
    # updates, gradients and variances. It also depends on the type of used
    # optimizer.
    ocr_model = self.create_model()
    ocr_model.create_base(images=self.fake_images, labels_one_hot=None)
    with self.test_session() as sess:
      tfprof_root = model_analyzer.print_model_analysis(
          sess.graph,
          tfprof_options=model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)

      model_size_bytes = 4 * tfprof_root.total_parameters
      self.assertLess(model_size_bytes, 1 * 2**30)
Пример #5
0
def calculate_graph_metrics():
    param_stats = model_analyzer.print_model_analysis(
        tf.get_default_graph(),
        tfprof_options=model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
    return param_stats.total_parameters
Пример #6
0
    def cnn_model(features, labels, mode):

        feat_size = 150
        batch_size = 100

        input_layer = tf.reshape(features,
                                 [-1, feat_size, feat_size, feat_size, 1])
        labels = tf.reshape(labels, [-1, 1])
        print("shape cnn")
        print(labels.shape)
        print(input_layer.shape)

        # inputs = [ batch_size , 150 , 150 , 150 , 1 ]
        conv1 = tf.layers.conv3d(inputs=input_layer,
                                 filters=4,
                                 kernel_size=[5, 5, 5],
                                 padding="same",
                                 activation=tf.nn.sigmoid)

        pool1 = tf.layers.max_pooling3d(inputs=conv1,
                                        pool_size=[10, 10, 10],
                                        strides=10)
        print("shape layer1")
        print(conv1.shape)
        print(pool1.shape)
        # pool1 = [batch_size , 30, 30, 30 , 32 ]
        """
        conv2 = tf.layers.conv3d(
            inputs = pool1 ,
            filters = 16 ,
            kernel_size = [ 5 , 5, 5 ] ,
            padding = "same" ,
            activation = tf.nn.sigmoid 
        )
        #conv2 = [ batch_size , 30, 30 , 30 , 64]

        pool2 = tf.layers.max_pooling3d(
            inputs = conv2 ,
            pool_size = [ 5 , 5 , 5 ] , 
            strides = 5 
        )
       
        
        # pool = [ batch_size , 6 , 6 , 6 , 64]
        #size capa final = 5*5*5*128
        #size = 15*15*15*2  # cambiar 5 por 2
        print("shapes second layer")
        print( conv2.shape )
        print(pool2.shape)

         """
        #size = 15*15*15*8
        size = 15 * 15 * 15 * 4
        pool_flat = tf.reshape(pool1, [-1, size])
        # 43 MB
        print("shape pool_flat")
        print(pool_flat.shape)

        dropout = tf.layers.dropout(
            inputs=pool_flat,
            rate=0.4,
            training=mode == model_fn_lib.ModeKeys.TRAIN)

        result = tf.layers.dense(inputs=dropout,
                                 units=1,
                                 activation=tf.nn.sigmoid)
        # activation = tf.nn.relu
        """
        result = tf.layers.dense(
            inputs = dense1, 
            units  = 1 ,
            activation =  tf.nn.relu 
        )
        """
        #
        #
        print("shape result")
        print(result.shape)
        loss = None

        train_op = None

        param_stats = model_analyzer.print_model_analysis(
            tf.get_default_graph(),
            tfprof_options=tf.contrib.tfprof.model_analyzer.
            TRAINABLE_VARS_PARAMS_STAT_OPTIONS)

        sys.stdout.write('total_params: %d\n' % param_stats.total_parameters)

        tf.contrib.tfprof.model_analyzer.print_model_analysis(
            tf.get_default_graph(),
            tfprof_options=tf.contrib.tfprof.model_analyzer.FLOAT_OPS_OPTIONS)

        print("STAAAAAP")
        #infer and test mode
        if mode != model_fn_lib.ModeKeys.INFER:

            #onehot_labels = tf.one_hot( indices = tf.cast(labels, tf.int32 ) , depth = 1    )
            loss = tf.losses.log_loss(predictions=result, labels=labels)

        print(loss.shape)

        if mode == model_fn_lib.ModeKeys.TRAIN:
            print("TRAININIGNIGNI")
            print(input_layer.shape)
            """
            train_op = tf.contrib.layers.optimize_loss(
                loss = loss,
                global_step = tf.contrib.framework.get_global_step(),
                learning_rate = learning_rate ,
                optimizer = "Adam"
                
            )
            """
            global_step = tf.contrib.framework.get_global_step()
            #optimizer = tf.train.GradientDescentOptimizer( learning_rate )

            optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=0.0001)
            train_op = optimizer.minimize(loss, global_step=global_step)

            tf.summary.scalar("log_loss", loss)

        else:
            train_op = None

            print("Training not build ")

        predictions = {
            "probabilities": tf.nn.softmax(result, name="softmax_tensor"),
            "loss": loss
        }
        return predictions, loss, train_op
Пример #7
0
def calculate_graph_metrics():
    param_state = model_analyzer.print_model_analysis(
        tf.get_default_graph(),
        tfprof_options=model_analyzer.TRAINABLE_VARS_PARAMS_STAT_OPTIONS)
    return param_state.total_params