def train():
    from tensorgraph.trainobject import train as mytrain
    with tf.Session() as sess:
        word_len = 20
        sent_len = 50

        # load data
        X_train, y_train, nclass = tweets(word_len, sent_len)

        # build model
        X_ph, y_train_sb, y_test_sb = model(word_len, sent_len, nclass)
        y_ph = tf.placeholder('float32', [None, nclass])

        # set cost and optimizer
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        # train model
        mytrain(session=sess,
                feed_dict={X_ph:X_train, y_ph:y_train},
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5, max_epoch=100,
                percent_decrease=0, train_valid_ratio=[5,1],
                batchsize=64, randomize_split=False)
Exemplo n.º 2
0
def train_with_trainobject():
    from tensorgraph.trainobject import train as mytrain
    with tf.Session() as sess:
        seq = model()
        X_train, y_train, X_test, y_test = Mnist(flatten=False,
                                                 onehot=True,
                                                 binary=True,
                                                 datadir='.')
        X_ph = tf.placeholder('float32', [None, 28, 28, 1])
        y_ph = tf.placeholder('float32', [None, 10])
        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)
        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Exemplo n.º 3
0
def train_with_trainobject():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        seq = model(nclass=nclass, h=h, w=w, c=c)

        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Exemplo n.º 4
0
def train_with_Resnet():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        seq = tg.Sequential()
        id1 = IdentityBlock(input_channels=c,
                            input_shape=(h, w),
                            nlayers=4,
                            filters=[32, 64])
        seq.add(id1)
        trans1 = TransitionLayer(input_channels=id1.output_channels,
                                 input_shape=id1.output_shape)
        seq.add(trans1)

        id2 = IdentityBlock(input_channels=trans1.output_channels,
                            input_shape=trans1.output_shape,
                            nlayers=4,
                            filters=[64, 128])
        seq.add(id2)
        trans2 = TransitionLayer(input_channels=id2.output_channels,
                                 input_shape=id2.output_shape)
        seq.add(trans2)
        seq.add(Flatten())
        ldim = trans2.output_channels * np.prod(trans2.output_shape)
        seq.add(Linear(ldim, nclass))
        seq.add(Softmax())

        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Exemplo n.º 5
0
def train_with_Densenet():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        seq = tg.Sequential()
        dense = DenseNet(input_channels=c,
                         input_shape=(h, w),
                         ndense=3,
                         growth_rate=4,
                         nlayer1blk=4)
        seq.add(dense)
        seq.add(Flatten())
        ldim = dense.output_channels
        seq.add(Linear(ldim, nclass))
        seq.add(Softmax())

        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        print(tf.global_variables())
        print('..total number of global variables: {}'.format(
            len(tf.global_variables())))
        count = 0
        for var in tf.global_variables():
            count += int(np.prod(var.get_shape()))
        print('..total number of global parameters: {}'.format(count))

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Exemplo n.º 6
0
def generator_cost(y_ph, real, fake):
    real_clss, real_judge = real
    fake_clss, fake_judge = fake
    # real_entropy = entropy(y_ph, real_clss)
    fake_mse = tf.reduce_mean((y_ph - fake_clss)**2)
    fake_entropy = entropy(y_ph, fake_clss)
    # fake_judge = clip(fake_judge)
    # cost = -0.5*tf.reduce_mean(tf.log(fake_judge))
    # cost = 0.5*tf.reduce_mean(tf.log(1-fake_judge))
    # real_entropy = entropy(y_ph, real_clss)
    # fake_entropy = entropy(y_ph, fake_clss)
    # ttl_cost = cost + fake_entropy
    # return ttl_cost
    cost = -tf.reduce_mean(fake_judge) + fake_entropy
    return cost
Exemplo n.º 7
0
def train_with_VGG():
    from tensorgraph.trainobject import train as mytrain
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                                   whiten=False)
        _, h, w, c = X_train.shape
        _, nclass = y_train.shape
        print('X max', np.max(X_train))
        print('X min', np.min(X_train))
        from tensorgraph.layers import VGG19
        seq = tg.Sequential()
        layer = VGG19(input_channels=c, input_shape=(h, w))
        seq.add(layer)
        seq.add(Flatten())
        seq.add(Linear(512, nclass))
        seq.add(Softmax())
        X_ph = tf.placeholder('float32', [None, h, w, c])
        y_ph = tf.placeholder('float32', [None, nclass])

        y_train_sb = seq.train_fprop(X_ph)
        y_test_sb = seq.test_fprop(X_ph)
        train_cost_sb = entropy(y_ph, y_train_sb)
        optimizer = tf.train.AdamOptimizer(0.001)
        test_accu_sb = accuracy(y_ph, y_test_sb)

        mytrain(session=sess,
                feed_dict={
                    X_ph: X_train,
                    y_ph: y_train
                },
                train_cost_sb=train_cost_sb,
                valid_cost_sb=-test_accu_sb,
                optimizer=optimizer,
                epoch_look_back=5,
                max_epoch=100,
                percent_decrease=0,
                train_valid_ratio=[5, 1],
                batchsize=64,
                randomize_split=False)
Exemplo n.º 8
0
def discriminator_cost(y_ph, real, fake):
    real_clss, real_judge = real
    fake_clss, fake_judge = fake
    # real_judge = clip(real_judge)
    # fake_judge = clip(fake_judge)
    # cost = -0.5*tf.reduce_mean(tf.log(real_judge)) - 0.5*tf.reduce_mean(tf.log(1-fake_judge))
    # fake_entropy = entropy(y_ph, fake_clss)
    real_mse = tf.reduce_mean((y_ph - real_clss)**2)
    real_entropy = entropy(y_ph, real_clss)
    # ttl_cost = cost + real_entropy
    # return ttl_cost

    # d_loss_real = tf.reduce_mean(
    # sigmoid_cross_entropy_with_logits(real_judge, tf.ones_like(real_judge)))
    #
    # d_loss_fake = tf.reduce_mean(
    # sigmoid_cross_entropy_with_logits(fake_judge, tf.zeros_like(fake_judge)))
    #
    # cost = d_loss_real + d_loss_fake
    # return cost
    cost = tf.reduce_mean(fake_judge) - tf.reduce_mean(real_judge) + real_entropy
    return cost
Exemplo n.º 9
0
    X_ph = tf.placeholder('float32', [None, 83, 256, 256, 1])
    y_ph = tf.placeholder('float32', [None, 83, 256, 256, 1])
    #X_ph = tf.placeholder('float32', [None, None, None, None, 1])
    #y_ph = tf.placeholder('float32', [None, None, None, None, 1])

    y_train_sb = seq.train_fprop(X_ph)
    y_test_sb = seq.test_fprop(X_ph)

    #### COST FUNCTION
    #train_cost_sb = tf.reduce_mean((y_ph - y_train_sb)**2)
    #train_cost_sb = entropy(y_ph, y_train_sb)
    train_cost_sb = 1 - smooth_iou(y_ph, y_train_sb)

    #test_cost_sb = tf.reduce_mean((y_ph - y_test_sb)**2)
    test_cost_sb = entropy(y_ph, y_test_sb)
    #test_accu_sb = accuracy(y_ph, y_test_sb)
    test_accu_sb = iou(y_ph, y_test_sb, threshold=0.2)

    print('DONE')

    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(train_cost_sb)

    # model Saver
    saver = tf.train.Saver()

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
        print("INITIALIZE SESSION")
Exemplo n.º 10
0
    seq = model3D()
    dataset = WMHdataset('./WMH')
    assert dataset.AbleToRetrieveData(), 'not able to locate the directory of dataset'
    dataset.InitDataset(split=1.0)         # Take everything 100%

    #X_ph = tf.placeholder('float32', [None, 83, 256, 256, 1])
    #y_ph = tf.placeholder('float32', [None, 83, 256, 256, 1])
    X_ph = tf.placeholder('float32', [None, None, None, None, 1])
    y_ph = tf.placeholder('float32', [None, None, None, None, 1])
    
    y_train_sb = seq.train_fprop(X_ph)
    y_test_sb = seq.test_fprop(X_ph)

    #### COST FUNCTION
    #train_cost_sb = tf.reduce_mean((y_ph - y_train_sb)**2)
    train_cost_sb = entropy(y_ph, y_train_sb)

    #test_cost_sb = tf.reduce_mean((y_ph - y_test_sb)**2)
    test_cost_sb = entropy(y_ph, y_test_sb)
    test_accu_sb = accuracy(y_ph, y_test_sb)
    #test_accu_sb = smooth_iou(y_ph, y_test_sb)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(train_cost_sb)
    
    # model Saver
    saver = tf.train.Saver()
    
    
    gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    with tf.Session(config = tf.ConfigProto(gpu_options = gpu_options)) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)
Exemplo n.º 11
0
def train():
    learning_rate = 0.001
    batchsize = 32

    max_epoch = 300
    es = tg.EarlyStopper(max_epoch=max_epoch,
                         epoch_look_back=3,
                         percent_decrease=0)

    seq = model()
    X_train, y_train, X_test, y_test = Mnist(flatten=False,
                                             onehot=True,
                                             binary=True,
                                             datadir='.')
    iter_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize)
    iter_test = tg.SequentialIterator(X_test, y_test, batchsize=batchsize)

    X_ph = tf.placeholder('float32', [None, 28, 28, 1])
    y_ph = tf.placeholder('float32', [None, 10])

    y_train_sb = seq.train_fprop(X_ph)
    y_test_sb = seq.test_fprop(X_ph)

    train_cost_sb = entropy(y_ph, y_train_sb)
    test_cost_sb = entropy(y_ph, y_test_sb)
    test_accu_sb = accuracy(y_ph, y_test_sb)

    # required for BatchNormalization layer
    optimizer = tf.train.AdamOptimizer(learning_rate)
    update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
    with ops.control_dependencies(update_ops):
        train_ops = optimizer.minimize(train_cost_sb)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        best_valid_accu = 0
        for epoch in range(max_epoch):
            print('epoch:', epoch)
            pbar = tg.ProgressBar(len(iter_train))
            ttl_train_cost = 0
            ttl_examples = 0
            print('..training')
            for X_batch, y_batch in iter_train:
                feed_dict = {X_ph: X_batch, y_ph: y_batch}
                _, train_cost = sess.run([train_ops, train_cost_sb],
                                         feed_dict=feed_dict)
                ttl_train_cost += len(X_batch) * train_cost
                ttl_examples += len(X_batch)
                pbar.update(ttl_examples)
            mean_train_cost = ttl_train_cost / float(ttl_examples)
            print('\ntrain cost', mean_train_cost)

            ttl_valid_cost = 0
            ttl_valid_accu = 0
            ttl_examples = 0
            pbar = tg.ProgressBar(len(iter_test))
            print('..validating')
            for X_batch, y_batch in iter_test:
                feed_dict = {X_ph: X_batch, y_ph: y_batch}
                valid_cost, valid_accu = sess.run([test_cost_sb, test_accu_sb],
                                                  feed_dict=feed_dict)
                ttl_valid_cost += len(X_batch) * valid_cost
                ttl_valid_accu += len(X_batch) * valid_accu
                ttl_examples += len(X_batch)
                pbar.update(ttl_examples)
            mean_valid_cost = ttl_valid_cost / float(ttl_examples)
            mean_valid_accu = ttl_valid_accu / float(ttl_examples)
            print('\nvalid cost', mean_valid_cost)
            print('valid accu', mean_valid_accu)
            if best_valid_accu < mean_valid_accu:
                best_valid_accu = mean_valid_accu

            if es.continue_learning(valid_error=mean_valid_cost, epoch=epoch):
                print('epoch', epoch)
                print('best epoch last update:', es.best_epoch_last_update)
                print('best valid last update:', es.best_valid_last_update)
                print('best valid accuracy:', best_valid_accu)
            else:
                print('training done!')
                break
Exemplo n.º 12
0
def train():
    learning_rate = 0.001
    batchsize = 64
    max_epoch = 300
    es = tg.EarlyStopper(max_epoch=max_epoch,
                         epoch_look_back=None,
                         percent_decrease=0)

    X_train, y_train, X_test, y_test = Cifar10(contrast_normalize=False,
                                               whiten=False)
    _, h, w, c = X_train.shape
    _, nclass = y_train.shape

    seq = model(nclass=nclass, h=h, w=w, c=c)
    iter_train = tg.SequentialIterator(X_train, y_train, batchsize=batchsize)
    iter_test = tg.SequentialIterator(X_test, y_test, batchsize=batchsize)

    X_ph = tf.placeholder('float32', [None, h, w, c])
    y_ph = tf.placeholder('float32', [None, nclass])

    y_train_sb = seq.train_fprop(X_ph)
    y_test_sb = seq.test_fprop(X_ph)

    train_cost_sb = entropy(y_ph, y_train_sb)
    test_cost_sb = entropy(y_ph, y_test_sb)
    test_accu_sb = accuracy(y_ph, y_test_sb)

    # required for BatchNormalization layer
    optimizer = tf.train.AdamOptimizer(learning_rate)
    update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS)
    with ops.control_dependencies(update_ops):
        train_ops = optimizer.minimize(train_cost_sb)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        init = tf.global_variables_initializer()
        sess.run(init)

        best_valid_accu = 0
        for epoch in range(max_epoch):
            print('epoch:', epoch)
            pbar = tg.ProgressBar(len(iter_train))
            ttl_train_cost = 0
            ttl_examples = 0
            print('..training')
            for X_batch, y_batch in iter_train:
                feed_dict = {X_ph: X_batch, y_ph: y_batch}
                _, train_cost = sess.run([train_ops, train_cost_sb],
                                         feed_dict=feed_dict)
                ttl_train_cost += len(X_batch) * train_cost
                ttl_examples += len(X_batch)
                pbar.update(ttl_examples)
            mean_train_cost = ttl_train_cost / float(ttl_examples)
            print('\ntrain cost', mean_train_cost)

            ttl_valid_cost = 0
            ttl_valid_accu = 0
            ttl_examples = 0
            pbar = tg.ProgressBar(len(iter_test))
            print('..validating')
            for X_batch, y_batch in iter_test:
                feed_dict = {X_ph: X_batch, y_ph: y_batch}
                valid_cost, valid_accu = sess.run([test_cost_sb, test_accu_sb],
                                                  feed_dict=feed_dict)
                ttl_valid_cost += len(X_batch) * valid_cost
                ttl_valid_accu += len(X_batch) * valid_accu
                ttl_examples += len(X_batch)
                pbar.update(ttl_examples)
            mean_valid_cost = ttl_valid_cost / float(ttl_examples)
            mean_valid_accu = ttl_valid_accu / float(ttl_examples)
            print('\nvalid cost', mean_valid_cost)
            print('valid accu', mean_valid_accu)
            if best_valid_accu < mean_valid_accu:
                best_valid_accu = mean_valid_accu

            if es.continue_learning(valid_error=mean_valid_cost, epoch=epoch):
                print('epoch', epoch)
                print('best epoch last update:', es.best_epoch_last_update)
                print('best valid last update:', es.best_valid_last_update)
                print('best valid accuracy:', best_valid_accu)
            else:
                print('training done!')
                break