コード例 #1
0
train_data_filename = gm.maybe_download(train_images_name)
train_mnist = gm.extract_data(train_data_filename, 60000)

train_label_name = "train-labels-idx1-ubyte.gz"  #  training set labels (28881 bytes)
train_label_filename = gm.maybe_download(train_label_name)
train_mnist_label = gm.extract_labels(train_label_filename, 60000)

test_image_name = "t10k-images-idx3-ubyte.gz"  #  test set images (1648877 bytes)
test_data_filename = gm.maybe_download(test_image_name)
test_mnist = gm.extract_data(test_data_filename, 5000)

test_label_name = "t10k-labels-idx1-ubyte.gz"  #  test set labels (4542 bytes)
test_label_filename = gm.maybe_download(test_label_name)
test_mnist_label = gm.extract_labels(test_label_filename, 5000)

inp = mod.ConstantPlaceholderModule("input", shape=(BATCH_SIZE, 28, 28, 1))
labels = mod.ConstantPlaceholderModule("input_labels", shape=(BATCH_SIZE, 10))
keep_prob = mod.ConstantPlaceholderModule("keep_prob",
                                          shape=(),
                                          dtype=tf.float32)

activations = [tf.nn.relu, tf.nn.relu, tf.nn.relu, tf.identity]
filter_shapes = [[8, 8, 1, 6], [8, 8, 6, 16]]
bias_shapes = [[1, 28, 28, 6], [1, 14, 14, 16], [1, 120], [1, 10]]
ksizes = [[1, 4, 4, 1], [1, 4, 4, 1]]
pool_strides = [[1, 2, 2, 1], [1, 2, 2, 1]]
network = recurrentLenet5("rlenet5", activations, filter_shapes, bias_shapes,
                          ksizes, pool_strides, keep_prob.placeholder)

one_time_error = mod.ErrorModule("cross_entropy", cross_entropy)
error = mod.TimeAddModule("add_error")
コード例 #2
0
CLASSES = 10

INP_MIN = -1
INP_MAX = 1
DTYPE = tf.float32

TEST_SUMMARIES = []
TRAIN_SUMMARIES = []
ADDITIONAL_SUMMARIES = []
IMAGE_SUMMARIES = []

inp = m.PlaceholderModule("input", (BATCH_SIZE, IMAGE_HEIGHT, \
  IMAGE_WIDTH, IMAGE_CHANNELS), dtype=tf.float32)
labels = m.PlaceholderModule("input_labels", (BATCH_SIZE, \
  CLASSES), dtype=DTYPE)
keep_prob = m.ConstantPlaceholderModule("keep_prob", shape=(), dtype=DTYPE)
is_training = m.ConstantPlaceholderModule("is_training",
                                          shape=(),
                                          dtype=tf.bool)

# global_step parameter for restarting and to continue training
global_step = tf.Variable(0, trainable=False, name='global_step')
increment_global_step = tf.assign_add(global_step,
                                      1,
                                      name='increment_global_step')
global_epoch = tf.Variable(0, trainable=False, name='global_epoch')
increment_global_epoch = tf.assign_add(global_epoch,
                                       1,
                                       name='increment_global_epoch')

lrate = tf.Variable(FLAGS.learning_rate, trainable=False, name='learning_rate')
コード例 #3
0
delay = 1
        
trainX = reformdata_train[:-WINDOW-delay]
trainY = reformdata_train[WINDOW+delay:, 0:s_train.shape[1]]
testX = reformdata_test[:-WINDOW-delay]
testY = reformdata_test[WINDOW+delay:, 0:s_test.shape[1]]

CELL_SIZE = 300
TIME_DEPTH = 5
BATCH_SIZE = 1
NFFT = 128
in_size = (NFFT + 1) * WINDOW
out_size = NFFT + 1

inp = mod.ConstantPlaceholderModule("input", shape=(BATCH_SIZE, in_size))
target = mod.ConstantPlaceholderModule("target", shape=(BATCH_SIZE, out_size))
cell = lstm.LSTM_Cell("lstm_cell", in_size, CELL_SIZE)

out_prediction = mod.FullyConnectedLayerModule("out_prediction", tf.identity, CELL_SIZE, out_size)
err = mod.ErrorModule("mse", mean_squared_error)
opt = mod.OptimizerModule("adam", tf.train.AdamOptimizer())

#  Connect input
cell.add_input(inp)
out_prediction.add_input(cell)
err.add_input(target)
err.add_input(out_prediction)
opt.add_input(err)
opt.create_output(TIME_DEPTH)
out_prediction.create_output(1)
コード例 #4
0
train_data_filename = gm.maybe_download(train_images_name)
train_mnist = gm.extract_data(train_data_filename, 60000)

train_label_name = "train-labels-idx1-ubyte.gz"  #  training set labels (28881 bytes)
train_label_filename = gm.maybe_download(train_label_name)
train_mnist_label = gm.extract_labels(train_label_filename, 60000)

test_image_name = "t10k-images-idx3-ubyte.gz"  #  test set images (1648877 bytes)
test_data_filename = gm.maybe_download(test_image_name)
test_mnist = gm.extract_data(test_data_filename, 5000)

test_label_name = "t10k-labels-idx1-ubyte.gz"  #  test set labels (4542 bytes)
test_label_filename = gm.maybe_download(test_label_name)
test_mnist_label = gm.extract_labels(test_label_filename, 5000)

inp = mod.ConstantPlaceholderModule("input", shape=(BATCH_SIZE, 28, 28, 1))
labels = mod.ConstantPlaceholderModule("input_labels", shape=(BATCH_SIZE, 10))

activations = [tf.nn.relu, tf.nn.relu, tf.nn.relu, tf.identity]
filter_shapes = [[8, 8, 1, 6], [8, 8, 6, 16]]
bias_shapes = [[1, 28, 28, 6], [1, 14, 14, 16], [1, 120], [1, 10]]
ksizes = [[1, 4, 4, 1], [1, 4, 4, 1]]
pool_strides = [[1, 2, 2, 1], [1, 2, 2, 1]]
network = RecurentLenet5("rlenet5", activations, filter_shapes, bias_shapes,
                         ksizes, pool_strides)

one_time_error = mod.ErrorModule("cross_entropy", cross_entropy)
error = mod.TimeAddModule("add_error")
accuracy = mod.BatchAccuracyModule("accuracy")
optimizer = mod.OptimizerModule("adam", tf.train.AdamOptimizer())
コード例 #5
0
train_mnist = gm.extract_data(train_data_filename, 60000)

train_label_name = "train-labels-idx1-ubyte.gz"  #  training set labels (28881 bytes)
train_label_filename = gm.maybe_download(train_label_name)
train_mnist_label = gm.extract_labels(train_label_filename, 60000)

test_image_name = "t10k-images-idx3-ubyte.gz"  #  test set images (1648877 bytes)
test_data_filename = gm.maybe_download(test_image_name)
test_mnist = gm.extract_data(test_data_filename, 5000)

test_label_name = "t10k-labels-idx1-ubyte.gz"  #  test set labels (4542 bytes)
test_label_filename = gm.maybe_download(test_label_name)
test_mnist_label = gm.extract_labels(test_label_filename, 5000)


inp = mod.ConstantPlaceholderModule("input", shape=(BATCH_SIZE, 28, 28, 1))
labels = mod.ConstantPlaceholderModule("input", shape=(BATCH_SIZE, 10))
is_training = mod.ConstantPlaceholderModule("is_training", shape=(), dtype=tf.bool)


activations = [tf.nn.relu, tf.nn.relu, tf.nn.relu, tf.identity]
filter_shapes = [[8,8,1,6],[8,8,6,16]]
strides = [[1,1,1,1], [1,1,1,1]]
bias_shapes = [[1,28,28,6],[1,14,14,16], [1,120],[1,10]]
ksizes = [[1,4,4,1],[1,4,4,1]]
pool_strides = [[1,2,2,1], [1,2,2,1]]
network = Lenet5("lenet5", is_training.placeholder, activations, filter_shapes, strides, bias_shapes, ksizes, pool_strides)

error = mod.ErrorModule("cross_entropy", cross_entropy)
accuracy = mod.BatchAccuracyModule("accuracy")
optimizer = mod.OptimizerModule("adam", tf.train.AdamOptimizer())