예제 #1
0
def train(train_dir, batch_size, num_batches, log_dir):
    images, labels = inputs(train_dir,
                            batch_size,
                            num_batches,
                            one_hot_labels=True,
                            train=True)

    predictions = simple(images)
    slim.losses.softmax_cross_entropy(predictions, labels)
    total_loss = tf.clip_by_value(slim.losses.get_total_loss(), 1e-10,
                                  1000000.0)
    tf.scalar_summary('loss', total_loss)

    #optimizer = tf.train.RMSPropOptimizer(0.001, 0.9)
    optimizer = tf.train.GradientDescentOptimizer(0.001)
    train_op = slim.learning.create_train_op(total_loss,
                                             optimizer,
                                             summarize_gradients=True)

    slim.learning.train(train_op, log_dir, save_summaries_secs=20)
import tensorflow as tf

import input_data
data = input_data.read_data_sets("/tmp/data/", one_hot=True)

# model
import model
with tf.variable_scope("simple"):
    x = tf.placeholder("float", [None, 784])
    y, variables = model.simple(x)

# train
y_ = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.GradientDescentOptimizer(0.01).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

saver = tf.train.Saver(variables)
init = tf.initialize_all_variables()
with tf.Session() as sess:
    sess.run(init)
    for i in range(1000):
        batch_xs, batch_ys = data.train.next_batch(100)
        sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})

    print sess.run(accuracy, feed_dict={x: data.test.images, y_: data.test.labels})

    path = saver.save(sess, "data/simple.ckpt")
    print "Saved:", path
예제 #3
0
import os
import tensorflow as tf

from tensorflow.examples.tutorials.mnist import input_data

data = input_data.read_data_sets("/tmp/data/", one_hot=True)

# model
import model
with tf.variable_scope("simple"):
    x = tf.placeholder("float", [None, 784])
    y, variables = model.simple(x)

# train
y_ = tf.placeholder("float", [None, 10])
cross_entropy = -tf.reduce_sum(y_ * tf.log(y))

#cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y_conv, y_))
#train using the adam optimizer rather than gradient descent
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

saver = tf.train.Saver(variables)
init = tf.initialize_all_variables()
with tf.Session() as sess:
    # Restore variables from disk.
    saver.restore(sess, "data/simple.ckpt")
    print("Model restored.")
    print(
예제 #4
0
import argparse
import model

parser = argparse.ArgumentParser(description="Run the simple net on local.")

if __name__ == "__main__":
    args = parser.parse_args()

    # Create a parameter server.
    net = model.simple()
    # net = model.multilayer_perceptron()

    # Download MNIST.
    ds = model.load_data()

    epoch = ds.train.epochs_completed
    while True:
        # Compute and apply gradients.
        xs, ys = ds.train.next_batch(100)
        net.sess.run(net.train_step, feed_dict={net.x: xs, net.y: ys})

        if ds.train.epochs_completed != epoch:
            # Evaluate the current model.
            test_xs, test_ys = ds.test.next_batch(ds.test.num_examples)
            accuracy = net.compute_accuracy(test_xs, test_ys)
            print("Epoch {}: accuracy is {}".format(epoch, accuracy))
            epoch = ds.train.epochs_completed

예제 #5
0
    def __init__(self, options, paths):
        """Prepare the network, criterion, optimizer, and data.

        Args:
            options, dict<str, float/int>: Hyperparameters.
            paths, dict<str, str>: Useful paths.
        """
        print('Prepare the network and data.')

        # Configurations.
        self._options = options
        self._paths = paths

        # Network.

        self._net = torch.nn.DataParallel(model.simple()).cuda()
        # self._net.load_state_dict(torch.load(r'H:\LiJiaSen\python_project\simple\src\model\bcnn_all_epoch_34.pth'),
        #                                          strict=False)

        print(self._net)
        self._criterion = torch.nn.CrossEntropyLoss().cuda()

        # Optimizer.
        self._optimizer = torch.optim.SGD(
            self._net.parameters(),
            lr=self._options['base_lr'],
            momentum=0.9,
            weight_decay=self._options['weight_decay'])
        self._scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
            self._optimizer,
            mode='max',
            factor=0.1,
            patience=8,
            verbose=True,
            threshold=1e-4)

        # Data.

        train_transforms = torchvision.transforms.Compose([
            # torchvision.transforms.RandomResizedCrop(size=448,
            #                                             scale=(0.8, 1.0)),
            torchvision.transforms.Resize(size=(224, 224)),
            # torchvision.transforms.RandomCrop(size=448),
            torchvision.transforms.RandomHorizontalFlip(),
            torchvision.transforms.RandomVerticalFlip(),
            torchvision.transforms.ToTensor(),
            # torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
            #                                     std=(0.229, 0.224, 0.225)),
        ])
        test_transforms = torchvision.transforms.Compose([
            # torchvision.transforms.Resize(size=448),
            # torchvision.transforms.CenterCrop(size=448),
            torchvision.transforms.Resize(size=(224, 224)),
            torchvision.transforms.ToTensor(),
            # torchvision.transforms.Normalize(mean=(0.485, 0.456, 0.406),
            #                                  std=(0.229, 0.224, 0.225)),
        ])
        train_data = Breast.BreastCancer(
            root=r'I:\\Dataset_crop_224_into_five\400x',
            train=True,
            transform=train_transforms,
            download=True)
        test_data = Breast.BreastCancer(
            root=r'I:\\Dataset_crop_224_into_five\400x',
            train=False,
            transform=test_transforms,
            download=True)

        self._train_loader = torch.utils.data.DataLoader(
            train_data,
            batch_size=self._options['batch_size'],
            shuffle=True,
            num_workers=4,
            pin_memory=False)
        self._test_loader = torch.utils.data.DataLoader(test_data,
                                                        batch_size=64,
                                                        shuffle=False,
                                                        num_workers=4,
                                                        pin_memory=False)