Ejemplo n.º 1
0
def load(sess, checkpoint_dir):
    import re
    print(" [*] Reading checkpoints...")
    # checkpoint_dir = os.path.join(checkpoint_dir, self.model_dir, self.model_name)
    # saver = self.saver

    ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
    # import the inspect_checkpoint library
    from tensorflow.python.tools import inspect_checkpoint as chkp

    if ckpt and ckpt.model_checkpoint_path:
        ckpt_name = os.path.basename(ckpt.model_checkpoint_path)

        # saver.restore(sess, os.path.join(checkpoint_dir, ckpt_name))
        # print all tensors in checkpoint file
        chkp.print_tensors_in_checkpoint_file(os.path.join(checkpoint_dir, ckpt_name), tensor_name='', all_tensors=False,all_tensor_names=True)
        # chkp._count_total_params

        # if not ckpt_name.find('best') == -1:
        #     counter = 0
        # else:
        #     counter = int(next(re.finditer("(\d+)(?!.*\d)", ckpt_name)).group(0))

        # print(" [*] Success to read {}".format(ckpt_name))
        # return True, counter
    else:
        print(" [*] Failed to find a checkpoint")
Ejemplo n.º 2
0
def freeze_graph():
    """
  Freeze tensorflow graph
  Inputs: None
  Ouutputs: None
  """
    checkpoint = "model_to_freeze.ckpt"

    saver = tf.train.import_meta_graph('model_to_freeze.ckpt.meta',
                                       clear_devices=True)
    graph = tf.get_default_graph()
    input_graph_def = graph.as_graph_def()
    print "Variables stored in checkpoint:"
    print_tensors_in_checkpoint_file(file_name=checkpoint,
                                     tensor_name='',
                                     all_tensors='')
    sess = tf.Session()
    saver.restore(sess, checkpoint)

    output_node_names = "densenet121/masked_logits,densenet121/probability"
    output_graph_def = graph_util.convert_variables_to_constants(
        sess,  # The session
        input_graph_def,  # input_graph_def is useful for retrieving the nodes 
        output_node_names.split(","))

    output_graph = "frozen_graph.pb"
    with tf.gfile.GFile(output_graph, "wb") as f:
        f.write(output_graph_def.SerializeToString())

    sess.close()
    os.remove('model_to_freeze.ckpt.meta')
    os.remove('model_to_freeze.ckpt.index')
    os.remove('model_to_freeze.ckpt.data-00000-of-00001')
Ejemplo n.º 3
0
 def print_tensor_names(self):
     ckpt = os.path.join(self.checkpoint_dir, 'model.ckpt') + '-' + repr(
         self.restore_epoch)
     print_tensors_in_checkpoint_file(file_name=ckpt,
                                      tensor_name='',
                                      all_tensors=False,
                                      all_tensor_names=False)
Ejemplo n.º 4
0
def get_ckpt_alltensor_3():
    savedir = './ckpt/'  # 路径问题,最后加反斜杠,如果不加会认为斜杠后是文件名,而不是路径名
    savefile = "model.ckpt"
    print_tensors_in_checkpoint_file(os.path.join(savedir,savefile), #ckpt文件名字
                     None, # 如果为None,则默认为ckpt里的所有变量
                     True, # bool 是否打印所有的tensor,这里打印出的是tensor的值,一般不推荐这里设置为False
                     True) # bool 是否打印所有的tensor的name
Ejemplo n.º 5
0
def load_model(sess,
               checkpoint_path,
               root_file,
               show_cp_content=True,
               ignore_missing_vars=False):
    """warm-start the training.
        """

    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
        copyfile(root_file, checkpoint_path + '/' + root_file)

    latest_ckpt = tf.train.latest_checkpoint(checkpoint_path)
    if not latest_ckpt:
        return tf.train.Saver()

    print('restore from checkpoint: ' + checkpoint_path)

    with HiddenPrints():
        variables = slim.get_model_variables(
        )  # slim.get_variables_to_restore()

    if show_cp_content:
        print()
        print(
            '------------------------------------------------------------------------------'
        )
        print('variables stored in checkpoint:')
        from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
        print_tensors_in_checkpoint_file(latest_ckpt, '', False)
        print(
            '------------------------------------------------------------------------------'
        )

    if ignore_missing_vars:
        reader = tf.train.NewCheckpointReader(latest_ckpt)
        saved_shapes = reader.get_variable_to_shape_map()

        var_names = sorted([(var.name, var.name.split(':')[0])
                            for var in variables
                            if var.name.split(':')[0] in saved_shapes])

        print('nr available vars in the checkpoint: %d' % len(var_names))
        restore_vars = []
        name2var = dict(
            zip(map(lambda x: x.name.split(':')[0], variables), variables))
        with tf.variable_scope('', reuse=True):
            for var_name, saved_var_name in var_names:
                curr_var = name2var[saved_var_name]
                var_shape = curr_var.get_shape().as_list()
                if var_shape == saved_shapes[saved_var_name]:
                    restore_vars.append(curr_var)

        print('nr vars restored: %d' % len(restore_vars))
        saver = tf.train.Saver(restore_vars)
    else:
        saver = tf.train.Saver(variables)

    saver.restore(sess, latest_ckpt)
    return saver
Ejemplo n.º 6
0
    def load(self,
             saver=None,
             session: tf.compat.v1.Session = None,
             allow_init=True):

        if not session: session = self.session
        if self.verb > 0: print()

        for var in self.vars:
            # look for checkpoint
            latest_filename = 'checkpoint'
            if saver: latest_filename += '_' + saver
            ckpt = tf.train.latest_checkpoint(
                checkpoint_dir=self.save_FD + '/' + var,
                latest_filename=latest_filename) if self.save_FD else None

            if ckpt:
                if self.verb > 1:
                    print(f'\n >>> tensors @ckpt {ckpt}')
                    print_tensors_in_checkpoint_file(file_name=ckpt,
                                                     tensor_name='',
                                                     all_tensors=False)
                self.savers[saver][var].restore(session, ckpt)
                if self.verb > 0:
                    print(
                        f'Variables {var} restored from checkpoint {saver if saver else ""}'
                    )

            else:
                assert allow_init, 'Err: saver load failed: checkpoint not found and not allowInit'
                session.run(tf.initializers.variables(self.vars[var]))
                if self.verb > 0:
                    print(
                        f'No checkpoint found, variables {var} initialized with default initializer'
                    )
Ejemplo n.º 7
0
def print_weights_in_checkpoint(model_folder, cp_name):
    load_path = os.path.join(model_folder, "{0}.ckpt".format(cp_name))

    print_tensors_in_checkpoint_file(file_name=load_path,
                                     tensor_name="",
                                     all_tensors=True,
                                     all_tensor_names=True)
Ejemplo n.º 8
0
def fn_inspect_checkpoint(ckpt_filepath, **kwargs):
    name = kwargs.get('tensor_name', '')
    if name == '':
        all_tensors = True
    else:
        all_tensors = False
    chkp.print_tensors_in_checkpoint_file(ckpt_filepath, name, all_tensors)
Ejemplo n.º 9
0
def restore(ckpt_dir, prefix):
    with tf.Session() as sess:
        # load meta
        new_saver = tf.train.import_meta_graph(ckpt_dir + prefix + '.meta')
        # restore model parameters
        latest_checkpoint = tf.train.latest_checkpoint(ckpt_dir)
        print_tensors_in_checkpoint_file(latest_checkpoint, all_tensors=False, all_tensor_names=True, tensor_name='')
        input()

        # checkpoint_path = os.path.join(model_dir, "model.ckpt")
        reader = pywrap_tensorflow.NewCheckpointReader(latest_checkpoint)
        var_to_shape_map = reader.get_variable_to_shape_map()

        for key in var_to_shape_map:
            print("tensor_name: ", key)
            input()
            print(reader.get_tensor(key))  # Remove this is you want to print only variable name

        new_saver.restore(sess, latest_checkpoint)
        # restore variables.
        logits = tf.get_collection("logits")[0]
        visits_weights = tf.get_collection("visits_betas")[0]
        codes_weights = tf.get_collection("codes_alphas")[0]
        preds = tf.get_collection("preds")[0]
        probs = tf.get_collection("probs")[0]
        accuracy = tf.get_collection("accuracy")[0]
        loss = tf.get_collection("cross_entropy")[0]
Ejemplo n.º 10
0
 def train_step(sess, train_op, loss, summaries, global_step, writer,
                params, saver, steps, loss_sum):
     '''
     Helper function for training the model
     '''
     if params['log_step'] is not None:
         #If it's a logging step
         _, loss_val, sumr, global_step_val = sess.run(
             [train_op, loss, summaries, global_step])
         writer.add_summary(sumr, global_step_val)
         loss_sum += loss_val
         steps += 1
         # log and reset averages
         if global_step_val % params[
                 'log_step'] == 0 or global_step_val == params['steps']:
             tf.logging.info('step: {}; loss: {}'.format(
                 global_step_val, loss_sum / steps))
             save_dir = os.path.join(params['model_dir'], 'model.ckpt')
             saver.save(sess, save_dir)
             loss_sum = 0
             steps = 0
         #Save a checkpoint if this is the last step
         if global_step_val == params[
                 'steps'] or global_step_val == params['steps'] - 1:
             chkp.print_tensors_in_checkpoint_file(os.path.join(
                 params['model_dir'], 'model.ckpt'),
                                                   tensor_name='',
                                                   all_tensors=False,
                                                   all_tensor_names=True)
     else:
         #If it's a non-logging step
         _, sumr, global_step_val = sess.run(
             [train_op, summaries, global_step])
         writer.add_summary(sumr, global_step_val)
     return steps, loss_sum
Ejemplo n.º 11
0
def check_model(model_file):
    from tensorflow.python.tools import inspect_checkpoint as chkp

    # print all tensors in checkpoint file
    chkp.print_tensors_in_checkpoint_file(model_file,
                                          tensor_name='',
                                          all_tensors=True)
def find_mbti_type(sentence_chunks, glove_vectors, sess, W1, b1, W2, b2):
    if sess is None:
        sess = tf.Session()
        checkpoint = tf.train.latest_checkpoint('../models')
        saver = tf.train.import_meta_graph(checkpoint + '.meta')
        saver.restore(sess, checkpoint)
        sess.run(tf.global_variables_initializer())

    print_tensors_in_checkpoint_file(file_name='../models/baseline.ckpt',
                                     tensor_name='',
                                     all_tensors=True)
    results = []
    for chunk in sentence_chunks:
        input = get_embedding_baseline(chunk, glove_vectors)
        x_arr = np.array(input).astype(np.float32)
        x_arr = x_arr.reshape(1, GLOVE_DIMENSION)

        if W1 is None:
            W1 = tf.get_default_graph().get_tensor_by_name("W1:0")
        if b1 is None:
            b1 = tf.get_default_graph().get_tensor_by_name("b1:0")
        if W2 is None:
            W2 = tf.get_default_graph().get_tensor_by_name("W2:0")
        if b2 is None:
            b2 = tf.get_default_graph().get_tensor_by_name("b2:0")

        ymid = tf.matmul(x_arr, W1) + b1
        yhat = tf.add(tf.matmul(ymid, W2), b2)
        predict = tf.argmax(yhat, 1)

        mbti_result = reverse_mbti_index[sess.run(predict)[0]]
        print(mbti_result)
        results.append(mbti_result)
    return results
Ejemplo n.º 13
0
def load_checkpoint(sess, checkpoint_path):
  saver = tf.train.Saver(tf.global_variables())
  print(checkpoint_path)
  ckpt = tf.train.get_checkpoint_state(checkpoint_path)
  tf.logging.info('Loading model %s.', ckpt.model_checkpoint_path)
  saver.restore(sess, ckpt.model_checkpoint_path)
  chkp.print_tensors_in_checkpoint_file(checkpoint_path, tensor_name='', all_tensors=True)
Ejemplo n.º 14
0
def inspect_tensor_name():
    chkp.print_tensors_in_checkpoint_file(
        file_name=
        "/home/jiatian/project/segdec-net-jim2019/output/segdec_train/KolektorSDD-dilate=5/full-size_cross-entropy/fold_0/model.ckpt-6599",
        tensor_name=None,  # 如果为None,则默认为ckpt里的所有变量
        all_tensors=False,  # bool 是否打印所有的tensor,这里打印出的是tensor的值,一般不推荐这里设置为False
        all_tensor_names=True)  # bool 是否打印所有的tensor的name
Ejemplo n.º 15
0
def print_tensor_in_ckpt(ckpt_folder,
                         all_tensor_values=False,
                         all_tensor_names=False):
    """ This function print the list of tensors in checkpoint file.

    Example:
    from GeneralTools.graph_func import print_tensor_in_ckpt
    ckpt_folder = '/home/richard/PycharmProjects/myNN/Results/cifar_ckpt/sngan_hinge_2e-4_nl'
    print_tensor_in_ckpt(ckpt_folder)

    :param ckpt_folder:
    :param all_tensor_values: Boolean indicating whether to print the values of all tensors.
    :param all_tensor_names: Boolean indicating whether to print all tensor names.
    :return:
    """
    from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file

    if not isinstance(ckpt_folder,
                      str):  # if list, use the name of the first file
        ckpt_folder = ckpt_folder[0]

    output_folder = os.path.join(FLAGS.DEFAULT_OUT, ckpt_folder)
    print(output_folder)
    ckpt = tf.train.get_checkpoint_state(output_folder)
    print(ckpt)
    print_tensors_in_checkpoint_file(file_name=ckpt.model_checkpoint_path,
                                     tensor_name='',
                                     all_tensors=all_tensor_values,
                                     all_tensor_names=all_tensor_names)
Ejemplo n.º 16
0
def extract_checkpoint(load_file, save_file, architecture='small'):

  print_tensors_in_checkpoint_file(file_name=load_file, tensor_name='', all_tensors=False)

  c1b, c1k, c2b, c2k, d1b, d1k, d2b, d2k = get_weight_vars(architecture)

  saver = tf.train.Saver()

  with tf.Session() as sess:
    # saver.restore(sess, "models_fmnist/nonprivate_lr005/model.ckpt-14040")
    saver.restore(sess, load_file)
    c1b_mat = c1b.eval()
    c1k_mat = c1k.eval()
    c2b_mat = c2b.eval()
    c2k_mat = c2k.eval()

    d1b_mat = d1b.eval()
    d1k_mat = d1k.eval()
    d2b_mat = d2b.eval()
    d2k_mat = d2k.eval()

  np.savez(save_file,
           c1b=c1b_mat, c1k=c1k_mat,
           c2b=c2b_mat, c2k=c2k_mat,
           d1b=d1b_mat, d1k=d1k_mat,
           d2b=d2b_mat, d2k=d2k_mat)

  dump = np.load(save_file)
  print(dump['c1b'])
Ejemplo n.º 17
0
def _print_checkpt_vars(path):
    # For debugging
    from tensorflow.python.tools.inspect_checkpoint import (
        print_tensors_in_checkpoint_file)
    print_tensors_in_checkpoint_file(path,
                                     all_tensor_names=True,
                                     all_tensors=False,
                                     tensor_name='')
Ejemplo n.º 18
0
def inspect_ckpt_v2(checkpoint):
    # 'all_tensors' indicates whether to print all tensors (include tensor name and value).
    # 'all_tensor_names' indicates whether to print all tensor names.
    # If both arguments are False, then print the tensor names and shapes in the checkpoint file.
    chkp.print_tensors_in_checkpoint_file(checkpoint,
                                          tensor_name='',
                                          all_tensors=False,
                                          all_tensor_names=True)
Ejemplo n.º 19
0
def test_dataSet(model_name, dataset_dir, process_name, batch_size):
    print_tensors_in_checkpoint_file(models_path,None,False,True)
    test_dataset = datasets.get_dataset(process_name, dataset_dir,
                                        'test', batch_size=batch_size)
    test_iterator = test_dataset.make_initializable_iterator()
    test_images, test_labels = test_iterator.get_next()
    labels = np.loadtxt(labels_filename, str, delimiter='\t')
    with tf.name_scope("inputs"):
        is_training = tf.placeholder(tf.bool)
        images = tf.placeholder(
            dtype=tf.float32, shape=[None, resize_height, resize_width, depths], name='inputs')
        labels = tf.placeholder(dtype=tf.int32, shape=[None, class_nums], name='label')

    with tf.name_scope("net"):
        logits,endpoints = net.inference(inputs=images, num_classes=class_nums,
                               is_training=is_training, dropout_keep_prob=1.0)

    # 将输出结果进行softmax分布,再求最大概率所属类别
    with tf.name_scope('Output'):
        score = tf.nn.softmax(logits,name='predict')
        class_id = tf.argmax(score, 1)
        correct_prediction = tf.equal(class_id, tf.argmax(labels, 1))
    saver = tf.train.Saver()
    init_op = [tf.global_variables_initializer(), tf.local_variables_initializer()]

    layer5_mv = tf.get_default_graph().get_tensor_by_name('layer4/conv/conv_2/batch_normalization/moving_mean:0')
    layer5_va = tf.get_default_graph().get_tensor_by_name('layer4/conv/conv_2/batch_normalization/moving_variance:0')

    with tf.Session() as sess:

        sess.run(init_op)
        saver.restore(sess, model_name)
        sess.run(test_iterator.initializer)
        num_correct, num_samples = 0, 0
        while True:
            try:
                test_batch_images, test_batch_labels \
                    = sess.run([test_images, test_labels])
                score_val, correct_pred,class_index \
                    = sess.run([ score,correct_prediction,class_id],
                               feed_dict={is_training: False,
                                          images: test_batch_images,
                                           labels: test_batch_labels})
                mv_val, va_val = sess.run([ layer5_mv,layer5_va],
                                       feed_dict={is_training: False,
                                                  images: test_batch_images,
                                                   labels: test_batch_labels})

                num_correct += correct_pred.sum()
                num_samples += correct_pred.shape[0]
                # print("mv{0}".format(mv_val))
                # print("va{0}".format(va_val))
                # print(str(score_val)+" " + str(class_index))
            except tf.errors.OutOfRangeError:
                break

        acc = float(num_correct) / num_samples
        return acc
Ejemplo n.º 20
0
def main():
    # ckpt_fn = os.path.join('../official-pretrained', 'model.ckpt')
    ckpt_fn = tf.train.latest_checkpoint('../models/8x8')

    # print all tensors in checkpoint file
    chkp.print_tensors_in_checkpoint_file(ckpt_fn,
                                          tensor_name='',
                                          all_tensors=False)
    return
Ejemplo n.º 21
0
def check_vars_of_ckpt(args, name):
    #We first take in the checkpoint name and directory for either digits, drums or piano
    ckpt_name = os.path.join(args.model_path, args.sname1, name + '.ckpt')

    #In-built command for printing only the names of the variables contained within the checkpoint file
    #This command can also be used to print the name of a specific tensor
    print_tensors_in_checkpoint_file(ckpt_name,
                                     all_tensors=True,
                                     tensor_name='')
Ejemplo n.º 22
0
 def inspect_checkpoint(inspect_checkpoint_path):
     """ Print nodes in checkpoint"""
     chkp.print_tensors_in_checkpoint_file(
         file_name=inspect_checkpoint_path,
         tensor_name=None,  # 如果为None,则默认为ckpt里的所有变量
         all_tensors=
         False,  # bool 是否打印所有的tensor,这里打印出的是tensor的值,一般不推荐这里设置为False
         all_tensor_names=False
     )  # bool 是否console.WriteLine(files1[i]);打印所有的tensor的name
Ejemplo n.º 23
0
 def restore(self, res_file):
     from tensorflow.python.tools.inspect_checkpoint import print_tensors_in_checkpoint_file
     print_tensors_in_checkpoint_file(res_file, None, False)
     with self.graph.as_default():
         for v in tf.global_variables():
             print(v)
         if self.saver is None:
             self.saver = tf.train.Saver(tf.global_variables())
         self.saver.restore(self.sess, res_file)
Ejemplo n.º 24
0
def dump_checkpoint_file(file):
    reader = tf.train.NewCheckpointReader(file)
    var_to_shape_map = reader.get_variable_to_shape_map()
    print('file: %s' % file)
    print('shape_map: %s' % var_to_shape_map)
    print_tensors_in_checkpoint_file(file_name=file,
                                     tensor_name='',
                                     all_tensors=True)
    print('\n')
Ejemplo n.º 25
0
def eval_once(saver, summary_writer, batch_predictions, batch_labels,
              summary_op):
    """Run Eval once.
    Args:
    saver: Saver.
    summary_writer: Summary writer.
    loss: From the prediction
    summary_op: Summary op.
    """
    # Get the loss of mean of batch images
    with tf.variable_scope("mean_loss_eval"):
        mean_loss = comet_dnn.loss(batch_predictions, batch_labels)

    # Get physics prediction
    predictions = tf.squeeze(batch_predictions)
    residual = predictions - batch_labels[:, 0]
    # Add summary
    tf.summary.histogram('/residual', residual)
    # define global number of images
    eval_index = 0

    # It seems like by default it is getting the latest check point
    ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)
    # Get path to the check point
    path_to_ckpt = ckpt.model_checkpoint_path

    global_step = path_to_ckpt.split('/')[-1].split('-')[-1]
    eval_index = int(global_step)

    with tf.Session() as sess:
        # Check if we have the checkpoint and path exist
        if ckpt and path_to_ckpt:
            # Restores from checkpoint
            saver.restore(sess, path_to_ckpt)
            # Assuming model_checkpoint_path looks something like:
            #   /my-favorite-path/comet_dnn_train/model.ckpt-0,
            # extract global_step from it.

            print_tensors_in_checkpoint_file(file_name=path_to_ckpt,
                                             tensor_name="",
                                             all_tensors="",
                                             all_tensor_names="")
            # Open summary
            print(eval_index)
            summary = tf.Summary()
            summary.ParseFromString(sess.run(summary_op))
            # Add value to summary
            loss = sess.run(mean_loss)
            summary.value.add(tag='pt_residual @ 1', simple_value=loss)
            #print("%d mean_loss %f " % (eval_index,loss))
            # Add summary
            summary_writer.add_summary(summary, eval_index)
            eval_index = eval_index + 1
        else:
            print('No checkpoint file found')
            return
Ejemplo n.º 26
0
def get_vars():
    """
    get all variable in checkpoint path
    Returns:
        None
    """
    latest_ckp = tf.train.latest_checkpoint(os.path.join('../models', '117M'))
    print_tensors_in_checkpoint_file(latest_ckp,
                                     all_tensors=True,
                                     tensor_name='')
Ejemplo n.º 27
0
def get_ckpt_vars(path, path_is_folder=True, name='', with_data=False):
    if path_is_folder:  # pick the latest checkpoint
        path = tf.train.latest_checkpoint(path)
    with io.StringIO() as buf, redirect_stdout(buf):
        inspect_checkpoint.print_tensors_in_checkpoint_file(
            path, name, with_data)
        entries = buf.getvalue().split('\n')
        entries = filter(None, entries)
        entries = [e.split(' ') for e in entries]
        return entries
Ejemplo n.º 28
0
def print_tensors(checkfile):
    print_tensors_in_checkpoint_file(checkfile,
                                     all_tensors=True,
                                     tensor_name='')

    reader = pywrap_tensorflow.NewCheckpointReader(checkfile)
    emb = reader.get_tensor("word_embedding/w")
    print("Embedding")
    print(emb)
    print(emb.shape)
Ejemplo n.º 29
0
def main(_):
    from tensorflow.python.tools.inspect_checkpoint import \
        print_tensors_in_checkpoint_file
    from slim_start import get_starter
    name = FLAGS.name
    starter = get_starter(name)
    latest_ckp = starter.get_checkpoint()
    print_tensors_in_checkpoint_file(latest_ckp,
                                     tensor_name='',
                                     all_tensors=False,
                                     all_tensor_names=True)
Ejemplo n.º 30
0
def test_mobilenet_v1():
    batch_size = 5
    height, width = 224, 224
    num_classes = 1001

    img = cv2.imread('laska.png')
    img_resize = cv2.resize(img, (height, width))
    img_rgb = cv2.cvtColor(img_resize, cv2.COLOR_BGR2RGB).astype(np.float32)
    # input_mat = np.random.rand(batch_size, height, width, 3).astype(np.float32)
    input_mat = np.zeros(shape=(batch_size, height, width, 3),
                         dtype=np.float32)
    # input_mat[0, :, :, :] = (img_rgb - 127) / 127.0
    input_mat[0, :, :, :] = img_rgb / 255.0
    print('input_mat[0]:', input_mat[0, :, :, :])
    print('input_mat[1]:', input_mat[1, :, :, :])

    # inputs = tf.random_uniform((batch_size, height, width, 3), name='input')
    inputs = tf.convert_to_tensor(input_mat, name='input', dtype=tf.float32)

    arg_scope = mobilenet_v1.mobilenet_v1_arg_scope(is_training=False,
                                                    weight_decay=0.0)
    with slim.arg_scope(arg_scope):
        logits, end_points = mobilenet_v1.mobilenet_v1(inputs,
                                                       num_classes,
                                                       is_training=False)

    model_dir = '/Users/alexwang/data/mobilenet_v1_1.0_160'
    checkpoint_path = os.path.join(model_dir, 'mobilenet_v1_1.0_160.ckpt')
    print_tensors_in_checkpoint_file(checkpoint_path, None, False)
    saver = tf.train.Saver()

    # print all node in graph
    # for tensor in tf.get_default_graph().as_graph_def().node:
    #     print(tensor.name)

    input_get = tf.get_default_graph().get_tensor_by_name('input:0')
    print('shape of input_get:{}'.format(input_get.shape))

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        predict = sess.run([end_points['Predictions']])
        print(predict)
        classes = np.argmax(predict[0], axis=1)
        print(classes)

        saver.restore(sess, checkpoint_path)
        predict = sess.run([end_points['Predictions']])[0]
        print('predict:', predict)
        classes = np.argsort(predict, axis=1)
        for predict_result in classes:
            # print(predict_result[::-1][0:5])
            class_names = [(label_names[i], predict[0][i])
                           for i in predict_result[::-1][0:5]]
            print(class_names)
Ejemplo n.º 31
0
def estimate(estimate_x):
    """ 预估
    :param estimate_x:
    :return: 无
    """
    saver = tf.train.Saver()

    print(estimate_x)

    # 加载最新模型
    print_tensors_in_checkpoint_file(FLAGS.model_file_dir, None, True)
    with tf.Session() as sess:
        saver.restore(sess, FLAGS.model_file_dir)
        print("z=", sess.run(z, feed_dict={x: estimate_x}))

    # 加载检查点
    with tf.Session() as sess:
        ckpt = tf.train.get_checkpoint_state(os.path.abspath(FLAGS.model_dir))
        if ckpt and ckpt.all_model_checkpoint_paths:  # 最新模型为:ckpt.model_checkpoint_path
            print_tensors_in_checkpoint_file(ckpt.all_model_checkpoint_paths[0], None, True)
            saver.restore(sess, ckpt.all_model_checkpoint_paths[0])
            print("z=", sess.run(z, feed_dict={x: estimate_x}))
Ejemplo n.º 32
0
def return_variables(self):
    chkp.print_tensors_in_checkpoint_file(self.variables_path  + self.name ,tensor_name='Weights', all_tensors=True)
Ejemplo n.º 33
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=C0103,C0111

import argparse
import os

from tensorflow.python.tools import inspect_checkpoint as chkp

parser = argparse.ArgumentParser(description="Print data in tensorflow checkpoint file.")
parser.add_argument("global_step", help="global step of the checkpoint file to print")
parser.add_argument("-n", metavar="tensor_name", default="",
                    help="name of the tensor in the checkpoint file to print "
                         "or print all tensors if not provided.")
args = parser.parse_args()

file_name = os.path.join("logs", "solver-net-{}".format(args.global_step))
tensor_name = args.n
all_tensors = True if args.n == "" else False

chkp.print_tensors_in_checkpoint_file(file_name, tensor_name=tensor_name, all_tensors=all_tensors)
PATH_TO_MODEL = os.path.join(PATH_TO_CKPT, MODEL_VERSION)

PATH_TO_WEIGHTS = 'numpy_weights'
PATH_TO_CONV1 = os.path.join(PATH_TO_WEIGHTS, 'conv1.weights.npz')
PATH_TO_CONV1_BIAS = os.path.join(PATH_TO_WEIGHTS, 'conv1.bias.npz')
PATH_TO_PRIMARY_CAPS = os.path.join(PATH_TO_WEIGHTS, 'primary_caps.weights.npz')
PATH_TO_PRIMARY_CAPS_BIAS = os.path.join(PATH_TO_WEIGHTS, 'primary_caps.bias.npz')
PATH_TO_DIGIT_CAPS = os.path.join(PATH_TO_WEIGHTS, 'digit_caps.weights.npz')
PATH_TO_FULLY_CONNECTED1 = os.path.join(PATH_TO_WEIGHTS, 'fully_connected1.weights.npz')
PATH_TO_FULLY_CONNECTED2 = os.path.join(PATH_TO_WEIGHTS, 'fully_connected2.weights.npz')
PATH_TO_FULLY_CONNECTED3 = os.path.join(PATH_TO_WEIGHTS, 'fully_connected3.weights.npz')
PATH_TO_FULLY_CONNECTED1_BIAS = os.path.join(PATH_TO_WEIGHTS, 'fully_connected1.bias.npz')
PATH_TO_FULLY_CONNECTED2_BIAS = os.path.join(PATH_TO_WEIGHTS, 'fully_connected2.bias.npz')
PATH_TO_FULLY_CONNECTED3_BIAS = os.path.join(PATH_TO_WEIGHTS, 'fully_connected3.bias.npz')

print_tensors_in_checkpoint_file(file_name=PATH_TO_MODEL, tensor_name='', all_tensors=False)

sess = tf.Session()
new_saver = tf.train.import_meta_graph(PATH_TO_MODEL + '.meta')
new_saver.restore(sess, tf.train.latest_checkpoint(PATH_TO_CKPT))

# Conv1_layer/Conv/weights (DT_FLOAT) [9,9,1,256]
weights = sess.run('Conv1_layer/Conv/weights:0')
with open(PATH_TO_CONV1, 'wb') as outfile:
    np.save(outfile, weights)

# Conv1_layer/Conv/biases (DT_FLOAT) [256]
bias = sess.run('Conv1_layer/Conv/biases:0')
with open(PATH_TO_CONV1_BIAS, 'wb') as outfile:
    np.save(outfile, bias)