示例#1
0
def show_histogram(image: tf.Tensor, channel: str, nbins: int,
                   source_range: str, normalize: bool) -> None:
    """
    Given an image tensor, display it side-by-side with it's normalized histogram
    """
    values, centers = histogram(image,
                                nbins=nbins,
                                source_range=source_range,
                                normalize=normalize)

    fig, ax = plt.subplots(ncols=2, figsize=(10, 5))

    # Lookup table so that matplotlib displays the channel aswell
    cmap = dict(grey=plt.cm.gray,
                r=plt.cm.Reds,
                g=plt.cm.Greens,
                b=plt.cm.Blues)
    ax[0].imshow(image.eval(), cmap=cmap[channel])
    ax[0].axis('off')

    ax[1].plot(centers.eval(), values.eval(), lw=2)
    # Lookup table for nice titles
    names = dict(grey="grey", r="red", g="green", b="blue")
    ax[1].set_title(f'Histogram of {names[channel]} values')

    plt.tight_layout()
示例#2
0
def update_prototype(feature_map: tf.Tensor,
                     y: tf.Tensor,
                     center: tf.Tensor,
                     ld=5e-2):
    v_feature_map = feature_map.eval()
    v_y = np.argmax(y.eval(), axis=1)
    v_center = center.eval()

    df = pd.DataFrame({
        'y':
        v_y,
        "feature_map":
        [v_feature_map[i] for i in range(v_feature_map.shape[0])]
    })
    df2 = df.groupby("y")["feature_map"].apply(np.mean)
    update_ops = []
    for i in df2.index:
        update_ops.append(
            tf.assign(center[i], (1 - ld) * v_center[i] + ld * df2.loc[i]))

    return update_ops
示例#3
0
def _get_tensor_value(tensor_or_eager_tensor: tf.Tensor) -> Any:
    if ops.executing_eagerly_outside_functions():
        return np.asarray(tensor_or_eager_tensor)
    else:
        with tf.compat.v1.Session():
            return tensor_or_eager_tensor.eval()
示例#4
0
def print_layer(layer: tf.Tensor):
    values = layer.eval()
    print('Layer: {:s}'.format(layer.name))
    print(values)
示例#5
0
def train_and_test_model(training_op: tf.Operation, test_tensor: tf.Tensor,
                         global_step: tf.Tensor,
                         batch: IsingFileRecord.IsingBatch,
                         config: GlobalConfig):
    """
    Remember to scope under a default graph
    :param training_op: Runs a training step in the graph.
    Can itself be a no-op
    :param test_tensor: 2D tensor, the direct output layer of the NN
    :param batch: the IsingBatch tensor fed into the *TEST* tensor
    :param config: GlobalConfig
    :return:
    """
    #EVAL_FREQUENCY = int(0.05*FLAGS.max_steps)
    #g = tf.Graph()
    #with g.as_default():
    #global_training_step = tf.Variable(
    #   0, trainable=False, name='global_step')
    ch1 = logging.StreamHandler()
    ch2 = logging.FileHandler('run.log')
    logging.basicConfig(format='%(asctime)s %(message)s',
                        level=logging.DEBUG,
                        handlers=[ch1, ch2])

    last_eval_accuracy = tf.Variable(0,
                                     trainable=False,
                                     name='eval_acc',
                                     dtype=tf.float32)
    next_acc = tf.placeholder(tf.float32, [])
    update_acc = last_eval_accuracy.assign(next_acc)
    acc_sum_op = tf.summary.scalar('Eval_Accuracy', last_eval_accuracy)

    chk_saver = tf.train.Saver()
    sum_writer = tf.summary.FileWriter(FLAGS.train_dir, tf.get_default_graph())
    sum_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START))
    merged_sum = tf.summary.merge_all()
    sess = tf.Session()
    _load_chk_or_init(chk_saver, sess)
    log_freq = config.max_steps // 20
    if config.log_freq is not None:
        log_freq = config.log_freq
    acc = 0.0

    with sess.as_default():
        #initialization
        coord = tf.train.Coordinator(
        )  # start coordinator and threads for queues
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        #training loop
        for step in range(config.max_steps):
            i = global_step.eval()  #the current step
            # evaluate
            if i % config.eval_freq == 0:
                eval_result = _eval_once(test_tensor, batch, config, sess)
                acc = eval_result.overall_accuracy
                # update accuracy tensor
                sess.run(update_acc, feed_dict={next_acc: acc})
                # run training and summary
            _, summary = sess.run([training_op, merged_sum])
            #sess.run(training_op)
            if i % config.sum_save_freq == 0:
                sum_writer.add_summary(summary, i)
            #log every so often
            if (step) % log_freq == 0:
                logging.info('Step %d: %f', i, last_eval_accuracy.eval())
                #print("Step ", i, ": ", last_eval_accuracy.eval())
                chk_saver.save(sess, FLAGS.train_dir + "/chk")
        #final evaluation
        eval_result = _eval_once(test_tensor, batch, config, sess)
        sess.run(update_acc,
                 feed_dict={next_acc: eval_result.overall_accuracy})
        acc_sum = sess.run(acc_sum_op)
        sum_writer.add_summary(acc_sum, config.max_steps)
        #final checkpoint
        chk_saver.save(sess, FLAGS.train_dir + "/chk")
        coord.request_stop()
        coord.join(threads, stop_grace_period_secs=10)
    sum_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.STOP))
    sum_writer.flush()
    sum_writer.close()
    sess.close()

    return eval_result