Exemple #1
0
def tf_saved_model():
    tmp1 = next_tbd_dir()
    export_dir = os.path.join(tmp1, 'export')
    np1 = np.random.rand(3, 10)
    with tf.Graph().as_default() as tfG1:
        x_in = tf.placeholder(tf.float32, shape=[None, 10])
        x = tf.layers.dense(x_in, 20, name='dense0')
        x = tf.nn.relu(x)
        logits = tf.layers.dense(x, 10, name='logits')

    with tf.Session(graph=tfG1) as sess:
        sess.run(tf.global_variables_initializer())
        np2 = sess.run(logits, feed_dict={x_in: np1})
        tf.saved_model.simple_save(sess,
                                   export_dir,
                                   inputs={'x': x_in},
                                   outputs={'logits': logits})

    # saved_model_cli show --dir ../tbd01/tbd51023/export
    with tf.Graph().as_default() as tfG2:
        pass
    with tf.Session(graph=tfG2) as sess:
        _ = tf.saved_model.loader.load(sess,
                                       [tf.saved_model.tag_constants.SERVING],
                                       export_dir)
        tf1 = tfG2.get_tensor_by_name('Placeholder:0')
        tf2 = tfG2.get_tensor_by_name('logits/BiasAdd:0')
        np3 = sess.run(tf2, feed_dict={tf1: np1})
    print('tf_saved_model:: tf vs tf: ', hfe_r5(np2, np3))
Exemple #2
0
def tf_estimator_auc_one_labels(N0=1000):
    def _estimator_model_fn(features, labels, mode, params=None):
        tfvar1 = tf.get_variable('tfvar1', shape=[], dtype=tf.float32)
        loss = tf.math.reduce_sum(features + tfvar1) * 0

        if mode == tf.estimator.ModeKeys.EVAL:
            auc = tf.metrics.auc(labels, features)
            return tf.estimator.EstimatorSpec(mode,
                                              loss=loss,
                                              eval_metric_ops={'auc': auc})

        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer = tf.train.GradientDescentOptimizer(1e-10)
            train_op = optimizer.minimize(loss, tf.train.get_global_step())
            return tf.estimator.EstimatorSpec(mode,
                                              loss=loss,
                                              train_op=train_op)

    np1 = np.random.randint(0, 2, size=(N0, ), dtype=np.int32)
    tmp1 = np1 / 2 - 0.25 + np.random.rand(N0)
    np2 = ((tmp1 - tmp1.min()) / (tmp1.max() - tmp1.min())).astype(np.float32)
    auc = roc_auc_score(np1, np2)

    model_dir = next_tbd_dir()
    ds1 = lambda: tf.data.Dataset.from_tensor_slices((np2, np1)).batch(2)
    train_config = tf.estimator.RunConfig(save_checkpoints_secs=20 * 60,
                                          keep_checkpoint_max=10)
    DNN = tf.estimator.Estimator(_estimator_model_fn, model_dir, train_config)
    DNN.train(ds1, steps=1)
    tmp1 = DNN.evaluate(ds1)
    print('auc1:: np vs tf.estimator: ', hfe_r5(auc, tmp1['auc']))
Exemple #3
0
def _test_tf_load_graph_from_meta():
    meta_file = hf_data('model.ckpt-2999.meta')
    logdir = next_tbd_dir()
    tfG = tf_load_graph_from_meta(meta_file)
    print('{} has been loaded'.format(meta_file))
    tf_save_graph(tfG, logdir)
    print('loaded graph has been saved to {}'.format(logdir))
Exemple #4
0
def _test_tf_save_graph():
    logdir = next_tbd_dir()
    with tf.Graph().as_default() as tfG:
        tf1 = tf.constant(0, dtype=tf.float32, shape=[4], name='tf1')
        tfvar1 = tf.get_variable('tfvar1', shape=[3, 4], dtype=tf.float32)
        _ = tf1 * tfvar1
    tf_save_graph(tfG, logdir)
    print('graph has been saved to {}'.format(logdir))
Exemple #5
0
def tf_var_length_tfrecords(N0=100, min_len=3, max_len=7):
    logdir = next_tbd_dir()
    hf_file = lambda *x: os.path.join(logdir, *x)
    hf_str = lambda :''.join([chr(x) for x in np.random.randint(97,123,size=[np.random.randint(3,10)])])
    X1_len = np.random.randint(min_len,max_len,size=(N0,))
    X1 = [np.random.randint(0,100,size=[x]) for x in X1_len]
    X2_len = np.random.randint(min_len,max_len,size=(N0,))
    X2 = [np.random.rand(x) for x in X2_len]
    X3_len = np.random.randint(min_len,max_len,size=(N0,))
    X3 = [[hf_str() for _ in range(x)] for x in X3_len]
    tfrecords_file = hf_file('test01.tfrecords')

    # write
    with tf.python_io.TFRecordWriter(tfrecords_file) as writer:
        for ind1 in range(N0):
            example = tf.train.Example(features=tf.train.Features(feature={
                'X1_len': _int64_feature(X1_len[ind1]),
                'X1': _int64_list_feature(X1[ind1]),
                'X2_len': _int64_feature(X2_len[ind1]),
                'X2': _float_list_feature(X2[ind1]),
                'X3_len': _int64_feature(X3_len[ind1]),
                'X3': _bytes_list_feature([x.encode() for x in X3[ind1]]),
            }))
            writer.write(example.SerializeToString())

    # read
    def tf_decode_tfrecords(example_proto):
        example_fmt = {
            'X1_len': tf.FixedLenFeature([], tf.int64),
            'X1': tf.VarLenFeature(tf.int64),
            'X2_len': tf.FixedLenFeature([], tf.int64),
            'X2': tf.VarLenFeature(tf.float32),
            'X3_len': tf.FixedLenFeature([], tf.int64),
            'X3': tf.VarLenFeature(tf.string),
        }
        ret = tf.parse_single_example(example_proto, features=example_fmt)
        X1 = tf.sparse_to_dense(ret['X1'].indices, [ret['X1_len']], ret['X1'].values)
        X2 = tf.sparse_to_dense(ret['X2'].indices, [ret['X2_len']], ret['X2'].values)
        X3 = tf.sparse_to_dense(ret['X3'].indices, [ret['X3_len']], ret['X3'].values, '')
        return X1, X2, X3

    ds1 = tf.data.TFRecordDataset(tfrecords_file).map(tf_decode_tfrecords)
    tf1 = ds1.make_one_shot_iterator().get_next()

    with tf.Session() as sess:
        X1_,X2_,X3_ = zip(*[sess.run(tf1) for _ in range(N0)])

    tmp1 = all([hfe(x,y)<1e-5 for x,y in zip(X1,X1_)])
    print('X1 all equal: ', tmp1)
    tmp1 = all([hfe(x,y)<1e-5 for x,y in zip(X2,X2_)])
    print('X2 all equal: ', tmp1)
    tmp1 = all([all([y1==y2.decode() for y1,y2 in zip(x1,x2)]) for x1,x2 in zip(X3,X3_)])
    print('X3 all equal: ', tmp1)
Exemple #6
0
def tf_write_read_tfrecords(N0=100):
    logdir = next_tbd_dir()
    hf_file = lambda *x: os.path.join(logdir, *x)
    X1 = np.random.randint(0, 3, size=(N0,))
    X2 = np.random.randint(0, 3, size=(N0,2))
    X3 = np.random.rand(N0).astype(np.float32)
    X4 = np.random.rand(N0,2).astype(np.float32)
    X5 = [str(x) for x in range(N0)]
    X6 = [(str(x),str(x+1)) for x in range(N0)]
    tfrecords_file = hf_file('test01.tfrecords')

    # write tfrecords
    with tf.python_io.TFRecordWriter(tfrecords_file) as writer:
        for ind1 in range(N0):
            example = tf.train.Example(features=tf.train.Features(feature={
                'X1': _int64_feature(X1[ind1]),
                'X2': _int64_list_feature(X2[ind1]),
                'X3': _float_feature(X3[ind1]),
                'X4': _float_list_feature(X4[ind1]),
                'X5': _bytes_feature(X5[ind1].encode()),
                'X6':_bytes_list_feature([x.encode() for x in X6[ind1]]),
            }))
            writer.write(example.SerializeToString())

    # read tfrecords
    def ds_decode_tfrecords(example_proto):
        example_fmt = {
            'X1': tf.FixedLenFeature([], tf.int64),
            'X2': tf.FixedLenFeature([2], tf.int64),
            'X3': tf.FixedLenFeature([], tf.float32),
            'X4': tf.FixedLenFeature([2], tf.float32),
            'X5': tf.FixedLenFeature([], tf.string),
            'X6': tf.FixedLenFeature([2], tf.string)
        }
        ret = tf.parse_single_example(example_proto, features=example_fmt)
        return ret['X1'],ret['X2'],ret['X3'],ret['X4'],ret['X5'],ret['X6']

    ds1 = tf.data.TFRecordDataset(tfrecords_file).map(ds_decode_tfrecords)
    tf1 = ds1.make_one_shot_iterator().get_next()

    with tf.Session() as sess:
        X1_,X2_,X3_,X4_,X5_,X6_ = zip(*[sess.run(tf1) for _ in range(N0)])

    print('X1 error: ', hfe_r5(X1, np.array(X1_)))
    print('X2 error: ', hfe_r5(X2, np.array(X2_)))
    print('X3 error: ', hfe_r5(X3, np.array(X3_)))
    print('X4 error: ', hfe_r5(X4, np.array(X4_)))
    print('X5 all equal: ', all([x==y.decode() for x,y in zip(X5,X5_)]))
    tmp1 = all([all([y1==y2.decode() for y1,y2 in zip(x1,x2)]) for x1,x2 in zip(X6, X6_)])
    print('X6 all equal: ', tmp1)
Exemple #7
0
def _test_save_load_model(N0=10, N1=4, N2=3):
    tmp1 = next_tbd_dir()
    hf_file = lambda *x, dir0=tmp1: os.path.join(dir0, *x)
    path = hf_file('test01.pkl')  #suffix name doesn't matter

    np1 = np.random.rand(N0, N1)
    np2 = np.random.randint(0, N2, size=[
        N0,
    ])
    clf = SVC(probability=True)
    clf.fit(np1, np2)
    np3 = clf.predict_proba(np1)

    sklearn_save_model00(clf, path)
    np4 = sklearn_load_model00(path).predict_proba(np1)
    print('sklearn_save_model00: ', hfe_r5(np3, np4))

    sklearn_save_model00(clf, path)
    np4 = sklearn_load_model01(path).predict_proba(np1)
    print('sklearn_save_model01: ', hfe_r5(np3, np4))
Exemple #8
0
def tf_estimator_auc_multi_labels(N0=1000, N1=10):
    def _estimator_model_fn(features, labels, mode, params=None):
        tfvar1 = tf.get_variable('tfvar1', shape=[], dtype=tf.float32)
        loss = tf.math.reduce_sum(features + tfvar1) * 0

        if mode == tf.estimator.ModeKeys.EVAL:
            tmp1 = [x[:, 0] for x in tf.split(labels, N1, axis=1)]
            tmp2 = [x[:, 0] for x in tf.split(features, N1, axis=1)]
            auc_i = {
                'auc_' + str(ind1): tf.metrics.auc(x, y)
                for ind1, (x, y) in enumerate(zip(tmp1, tmp2))
            }
            return tf.estimator.EstimatorSpec(mode,
                                              loss=loss,
                                              eval_metric_ops=auc_i)

        if mode == tf.estimator.ModeKeys.TRAIN:
            optimizer = tf.train.GradientDescentOptimizer(1e-10)
            train_op = optimizer.minimize(loss, tf.train.get_global_step())
            return tf.estimator.EstimatorSpec(mode,
                                              loss=loss,
                                              train_op=train_op)

    np1 = np.random.randint(0, 2, size=(N0, N1), dtype=np.int32)
    tmp1 = np1 / 2 - 0.25 + np.random.rand(N0, N1)
    np2 = ((tmp1 - tmp1.min(0)) / (tmp1.max(0) - tmp1.min(0))).astype(
        np.float32)
    auc = np.array(
        [roc_auc_score(np1[:, ind1], np2[:, ind1]) for ind1 in range(N1)])

    model_dir = next_tbd_dir()
    ds1 = lambda: tf.data.Dataset.from_tensor_slices((np2, np1)).batch(2)
    train_config = tf.estimator.RunConfig(save_checkpoints_secs=20 * 60,
                                          keep_checkpoint_max=10)
    DNN = tf.estimator.Estimator(_estimator_model_fn, model_dir, train_config)
    DNN.train(ds1, steps=1)
    tmp1 = DNN.evaluate(ds1)
    tmp2 = np.array([tmp1['auc_' + str(i)] for i in range(N1)])
    print('auc_i:: np vs tf.estimator: ', hfe_r5(auc, tmp2))
Exemple #9
0
def tf_init_using_hook(N0=1000, N1=5000):
    logdir = next_tbd_dir()
    hf_file = lambda *x, dir0=logdir: os.path.join(dir0, *x)
    np1 = np.random.rand(N0, N1).astype(np.float32)

    def model_fn(features, labels, mode, params):
        tfvar1 = tf.get_variable('tfvar1', dtype=tf.float32, shape=[N0, N1])
        predict = tfvar1**2
        if mode == tf.estimator.ModeKeys.PREDICT:
            return tf.estimator.EstimatorSpec(mode,
                                              predictions={'predict': predict})
        loss = tf.math.reduce_sum(predict)
        if mode == tf.estimator.ModeKeys.EVAL:
            mae = tf.metrics.mean_absolute_error(predict, predict)
            return tf.estimator.EstimatorSpec(mode,
                                              loss=loss,
                                              eval_metric_ops={'mae': mae})
        optimizer = tf.train.GradientDescentOptimizer(params['lr'])
        with tf.control_dependencies(tf.get_collection(
                tf.GraphKeys.UPDATE_OPS)):
            train_op = optimizer.minimize(loss,
                                          tf.train.get_or_create_global_step())
        return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)

    fake_ckpt = hf_file('fake_ckpt', 'model.ckpt')
    _create_fake_ckpt(fake_ckpt, {'tfvar1': np1})
    tmp1 = np.arange(10), np.arange(10)
    ds_train = lambda: tf.data.Dataset.from_tensor_slices(tmp1)
    params = {'lr': 1}
    DNN = tf.estimator.Estimator(model_fn,
                                 logdir,
                                 config=tf.estimator.RunConfig(),
                                 params=params)
    hook1 = _MyHook1(fake_ckpt)
    DNN.train(ds_train, steps=1, hooks=[hook1])
    print('tf_init_using_hook before:: np vs tf: ',
          hfe_r5(np1, hook1._tfvar1_))
    tmp1 = np1 - np1 * 2 * params['lr']
    print('tf_init_using_hook after:: np vs tf: ', hfe_r5(tmp1, hook1.tfvar1_))
Exemple #10
0
def tf_events_filesize_whether_init_from_ckpt(N0=1000, N1=5000):
    logdir = next_tbd_dir()
    hf_file = lambda *x, dir0=logdir: os.path.join(dir0, *x)
    np1 = np.random.rand(N0, N1).astype(np.float32)

    # init_from_initializer
    dir1 = hf_file('init_from_initializer')
    with tf.Graph().as_default() as tfG1:
        tfG1_tfvar1 = tf.get_variable('tfG1_tfvar1',
                                      dtype=tf.float32,
                                      initializer=np1)
    with tf.Session(graph=tfG1) as sess:
        tf.summary.FileWriter(dir1, tfG1).close()
        _ = sess.run(tf.global_variables_initializer())
        tfG1_tfvar1_ = sess.run(tfG1_tfvar1)

    # init_from_ckpt
    dir2 = hf_file('init_from_ckpt')
    fake_ckpt = hf_file('fake_ckpt', 'model.ckpt')
    _create_fake_ckpt(fake_ckpt, {'tfvar1': np1})
    with tf.Graph().as_default() as tfG2:
        tfG2_tfvar1 = tf.get_variable('tfG2_tfvar1',
                                      dtype=tf.float32,
                                      shape=[N0, N1])
        tf.train.init_from_checkpoint(fake_ckpt, {'tfvar1': 'tfG2_tfvar1'})
    with tf.Session(graph=tfG2) as sess:
        tf.summary.FileWriter(dir2, tfG2).close()
        _ = sess.run(tf.global_variables_initializer())
        tfG2_tfvar1_ = sess.run(tfG2_tfvar1)

    print('tf_init_from_initializer:: np vs tf: ', hfe_r5(np1, tfG1_tfvar1_))
    print('tf_init_from_ckpt:: np vs tf: ', hfe_r5(np1, tfG2_tfvar1_))
    hf1 = lambda dir0: os.path.join(
        dir0, [x for x in os.listdir(dir0) if x.startswith('events')][0])
    print('tf_init_from_initializer events file size: ',
          os.path.getsize(hf1(dir1)))
    print('tf_init_from_ckpt events file size: ', os.path.getsize(hf1(dir2)))
Exemple #11
0
def tf_summary_scalar(N0=1000, N1=3):
    log_dir = next_tbd_dir()
    hf_file = lambda *x: os.path.join(log_dir, *x)

    npX = np.random.rand(N0, N1)
    npy = np.random.randint(0, 2, size=(N0, ))

    with tf.Graph().as_default() as tfG:
        ds1 = tf.data.Dataset.from_tensor_slices(
            (npX, npy)).repeat().shuffle(100).batch(32).prefetch(2)
        tfX, tfy = ds1.make_one_shot_iterator().get_next()

        x = tf.layers.dense(tfX,
                            20,
                            kernel_regularizer=tf.nn.l2_loss,
                            name='dense1')
        x = tf.nn.sigmoid(x)
        x = tf.layers.dense(x,
                            20,
                            kernel_regularizer=tf.nn.l2_loss,
                            name='dense2')
        x = tf.nn.sigmoid(x)
        x = tf.layers.dense(x, 1, name='dense3')[:, 0]

        with tf.variable_scope('loss'):
            tmp1 = tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.cast(
                tfy, x.dtype),
                                                           logits=x)
            CEloss = tf.math.reduce_mean(tmp1, name='CEloss')
            REGloss = 0.01 * tf.math.add_n(
                tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
            loss_all = CEloss + REGloss
        tf.summary.scalar('loss/cross_entropy', CEloss)
        tf.summary.scalar('loss/regularizer', REGloss)
        tf.summary.scalar('loss/all', loss_all)

        with tf.variable_scope('accuracy'):
            tmp1 = tf.cast(tf.equal(tf.cast(x > 0.5, tfy.dtype), tfy),
                           tf.float32)
            acc = tf.math.reduce_mean(tmp1, name='acc')
        tf.summary.scalar('acc', acc)

        train_op = tf.train.GradientDescentOptimizer(0.1).minimize(loss_all)
        merged = tf.summary.merge_all()

    with tf.Session(graph=tfG) as sess:
        writer = tf.summary.FileWriter(hf_file(), sess.graph)
        sess.run(tf.global_variables_initializer())
        for ind1 in range(1000):
            _, tmp1 = sess.run([train_op, merged])
            writer.add_summary(tmp1, global_step=ind1)

        run_metadata = tf.RunMetadata()
        _, tmp1 = sess.run(
            [train_op, merged],
            run_metadata=run_metadata,
            options=tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE))
        writer.add_run_metadata(run_metadata, 'step1000')
        writer.add_summary(tmp1, 1000)
        writer.close()
    print('run "tensorboard --logdir={}" out of python shell'.format(log_dir))
Exemple #12
0
    x = tf.layers.dense(x, 5, name='fc2')
    x = tf.nn.relu(x)
    logits = tf.layers.dense(x, 1, name='logits')[:, 0]
    return logits


def load_ckpt(filename):
    reader = tf.train.NewCheckpointReader(filename)
    return {
        k: reader.get_tensor(k)
        for k in reader.get_variable_to_shape_map()
    }


'''parameters'''
logdir1 = next_tbd_dir()
hf_file1 = lambda *x: os.path.join(logdir1, *x)
logdir2 = next_tbd_dir()
hf_file2 = lambda *x: os.path.join(logdir2, *x)
np_dict = {}
'''first build compute graph, initialize with some random number and save to logdir1'''
with tf.Graph().as_default() as tfG:
    x = tf.placeholder(dtype=tf.float32, name='features', shape=(None, 3))
    _ = compute_graph(x, tf.estimator.ModeKeys.TRAIN)

    z1 = tfG.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    aop = []
    for x in tfG.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
        tmp1 = np.random.uniform(size=x.shape.as_list())
        np_dict[x.name] = tmp1
        aop += [tf.assign(x, tmp1)]