def main(): dataset = MnistDataSet(validation_sample_count=5000) dataset.load_dataset() train_program_path = UtilityFuncs.get_absolute_path( script_file=__file__, relative_path="train_program.json") train_program = TrainProgram(program_file=train_program_path) cnn_lenet = TreeNetwork(run_id=0, dataset=dataset, parameter_file=None, tree_degree=2, tree_type=TreeType.hard, problem_type=ProblemType.classification, train_program=train_program, list_of_node_builder_functions=[baseline_network]) optimizer = SgdOptimizer(network=cnn_lenet, use_biased_gradient_estimates=True) cnn_lenet.set_optimizer(optimizer=optimizer) cnn_lenet.build_network() cnn_lenet.init_session() cnn_lenet.train()
dif = x_0 - x_1 nz = np.flatnonzero(dif) if len(nz) != 0: raise Exception("!!!ERROR!!!") print("Correct Result.") k = 3 D = MnistDataSet.MNIST_SIZE * MnistDataSet.MNIST_SIZE threshold = 0.3 feature_count = 32 epsilon = 0.000001 batch_size = 100 dataset = MnistDataSet(validation_sample_count=5000) dataset.load_dataset() samples, labels, indices_list = dataset.get_next_batch() index_list = np.arange(0, batch_size) initializer = tf.contrib.layers.xavier_initializer() x = tf.placeholder(tf.float32, name="x") indices = tf.placeholder(tf.int64, name="indices") # Convolution x_image = tf.reshape(x, [-1, MnistDataSet.MNIST_SIZE, MnistDataSet.MNIST_SIZE, 1]) C = tf.get_variable(name="C", shape=[5, 5, 1, feature_count], initializer=initializer, dtype=tf.float32) b_c = tf.get_variable(name="b_c", shape=(feature_count,), initializer=initializer, dtype=tf.float32) conv_without_bias = tf.nn.conv2d(x_image, C, strides=[1, 1, 1, 1], padding="SAME") conv = conv_without_bias + b_c # Branching flat_x = tf.reshape(x, [-1, D])