Beispiel #1
0
def main():
    # Set the hyper-parameters
    learning_rate = 0.001
    batch_size = 100
    epoches = 11

    train_loader, test_loader = MNIST_dataset(batch_size)

    # Build the model
    lenet = Lenet()
    # Set the loss function & optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(lenet.parameters(), lr=learning_rate)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # Train & Test the model
    training_loss = train_and_test(device,
                                   epoches,
                                   train_loader,
                                   test_loader,
                                   optimizer,
                                   criterion,
                                   model=lenet)
    plot_loss(training_loss)
Beispiel #2
0
 def active_learner(device, seed):
     torch.backends.cudnn.deterministic = True
     torch.manual_seed(seed)
     torch.cuda.manual_seed(seed)
     np.random.seed(seed)
     random.seed(seed)
     model = Lenet().to(device)
     optimizer = torch.optim.Adam(model.parameters(),
                                  lr=0.001,
                                  amsgrad=True)
     return model, optimizer, None
def main():
    mnist = input_data.read_data_sets("MNIST_data/", reshape=False)
    X_test, y_test = mnist.test.images, mnist.test.labels

    assert (len(X_test) == len(y_test))

    # Pad images with 0s
    X_test = np.pad(X_test, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')

    answer = 'y'

    # PLACEHOLDER FOR FEEDING INPUT DATA
    X = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name="X")

    net = Lenet(X)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        saver.restore(sess, cfg.FINAL_PATH)
        while answer != 'n':
            index = np.random.randint(0, len(X_test))

            Z = net.output.eval(
                feed_dict={X: X_test[index].reshape(1, 32, 32, 1)})
            y_pred = np.argmax(Z, axis=1)

            print("All of the classes :", Z)
            print("Predicted class:", y_pred)
            print("Actual class:   ", y_test[index])
            print()
            answer = input("Do you want to predict one more number?(y/n) ")
Beispiel #4
0
def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    sess = tf.Session()
    batch_size = cfg.BATCH_SIZE
    parameter_path = cfg.PARAMETER_FILE
    lenet = Lenet()
    max_iter = cfg.MAX_ITER

    saver = tf.train.Saver()
    if os.path.exists(parameter_path):
        saver.restore(parameter_path)
    else:
        sess.run(tf.initialize_all_variables())

    for i in range(max_iter):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:
            train_accuracy = sess.run(lenet.train_accuracy,
                                      feed_dict={
                                          lenet.raw_input_image: batch[0],
                                          lenet.raw_input_label: batch[1]
                                      })
            print("step %d, training accuracy %g" % (i, train_accuracy))
        sess.run(lenet.train_op,
                 feed_dict={
                     lenet.raw_input_image: batch[0],
                     lenet.raw_input_label: batch[1]
                 })
    save_path = saver.save(sess, parameter_path)
Beispiel #5
0
 def human_cnn(self):
     human_cnn = Lenet()
     human_cnn.load_state_dict(
         torch.load(
             'utils/cifar10_2class/trained_models/humancnn/lenet_weights_0.5_normalisation',
             map_location=self.device))
     human_cnn.to(self.device)
     return human_cnn
def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    test_images = mnist.test.images
    test_labels = mnist.test.labels
    sess = tf.Session()
    batch_size = cfg.BATCH_SIZE
    parameter_path = cfg.PARAMETER_FILE
    lenet = Lenet()
    max_iter = cfg.MAX_ITER

    saver = tf.train.Saver()

    sess.run(tf.initialize_all_variables())

    tf.summary.scalar("loss", lenet.loss)
    summary_op = tf.summary.merge_all()

    train_summary_writer = tf.summary.FileWriter("logs", sess.graph)

    for i in range(max_iter):
        batch = mnist.train.next_batch(batch_size)
        if i % 100 == 0:
            train_accuracy, summary = sess.run(
                [lenet.train_accuracy, summary_op],
                feed_dict={
                    lenet.raw_input_image: batch[0],
                    lenet.raw_input_label: batch[1]
                })
            train_summary_writer.add_summary(summary)
            print("step %d, training accuracy %g" % (i, train_accuracy))

        if i % 500 == 0:
            test_accuracy = sess.run(lenet.train_accuracy,
                                     feed_dict={
                                         lenet.raw_input_image: test_images,
                                         lenet.raw_input_label: test_labels
                                     })
            print("\n")
            print("step %d, test accuracy %g" % (i, test_accuracy))
            print("\n")

        sess.run(lenet.train_op,
                 feed_dict={
                     lenet.raw_input_image: batch[0],
                     lenet.raw_input_label: batch[1]
                 })
    saver.save(sess, parameter_path)
    print("saved model")
Beispiel #7
0
def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    sess = tf.Session()
    batch_size = cfg.BATCH_SIZE
    parameter_path = cfg.PARAMETER_FILE
    lenet = Lenet()
    max_iter = cfg.MAX_ITER


    saver = tf.train.Saver()
    if os.path.exists(parameter_path):
        saver.restore(parameter_path)
    else:
        sess.run(tf.initialize_all_variables())

    temp_step = 10
    result_step = np.arange(0, temp_step*100, 100)
    result_acc = np.zeros(temp_step)
    result_loss = np.zeros(temp_step)
    result_test = np.zeros(temp_step)
    for i in range(temp_step*100+1):
        batch = mnist.train.next_batch(50)
        if i % 100 == 0:
            r = int(i / 100 - 1)
            result_acc[r] = sess.run(lenet.train_accuracy,feed_dict={
                lenet.raw_input_image: batch[0], lenet.raw_input_label: batch[1]
            })
            result_loss[r] = sess.run(lenet.loss, feed_dict={
                lenet.raw_input_image: batch[0], lenet.raw_input_label: batch[1]
            })
            print("step %d, training accuracy %g, training loss %g" % (i, result_acc[r], result_loss[r]))
            result_test[r] = sess.run(lenet.train_accuracy, feed_dict={
                lenet.raw_input_image: mnist.test.images, lenet.raw_input_label: mnist.test.labels
            })
            print("test accuracy %g" % (result_test[r]))

        sess.run(lenet.train_op,feed_dict={lenet.raw_input_image: batch[0],lenet.raw_input_label: batch[1]})
    save_path = saver.save(sess, parameter_path)
    plt.plot(result_step, result_acc, label='training accuracy')
    plt.plot(result_step, result_test, label='test accuracy')
    plt.title('LeNet')
    plt.xlabel('step')
    plt.ylabel('accuracy')
    plt.legend()
    plt.show()
Beispiel #8
0
def main():

    image = Image.open("./img/prototype3.tiff")
    image = np.array(image)

    # PLACEHOLDERS FOR FEEDING INPUT DATA
    X = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name="X")

    net = Lenet(X)

    saver = tf.train.Saver()

    with tf.Session() as sess:
        saver.restore(sess, cfg.FINAL_PATH)
        Z = net.output.eval(feed_dict={X: image.reshape(1, 32, 32, 1)})
        y_pred = np.argmax(Z, axis=1)

        print("tutte le classi:", Z)
        print("Predicted class:", y_pred)
Beispiel #9
0
def main():
    # 将obs的训练数据拷贝到modelarts
    mox.file.copy_parallel(src_url="obs://canncamps-hw38939615/MNIST_data/",
                           dst_url="MNIST_data")
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    config = tf.ConfigProto()
    custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
    custom_op.name = "NpuOptimizer"
    config.graph_options.rewrite_options.remapping = RewriterConfig.OFF  # 必须显式关闭remap
    sess = tf.Session(config=config)

    batch_size = cfg.BATCH_SIZE
    parameter_path = cfg.PARAMETER_FILE
    lenet = Lenet()
    max_iter = cfg.MAX_ITER

    saver = tf.train.Saver()
    if os.path.exists(parameter_path):
        saver.restore(parameter_path)
    else:
        sess.run(tf.initialize_all_variables())

    for i in range(max_iter):
        batch = mnist.train.next_batch(batch_size)
        if i % 100 == 0:
            train_accuracy, train_loss = sess.run(
                [lenet.train_accuracy, lenet.loss],
                feed_dict={
                    lenet.raw_input_image: batch[0],
                    lenet.raw_input_label: batch[1]
                })
            print("step %d, training accuracy %g, loss is %g" %
                  (i, train_accuracy, train_loss))
        sess.run(lenet.train_op,
                 feed_dict={
                     lenet.raw_input_image: batch[0],
                     lenet.raw_input_label: batch[1]
                 })
    save_path = saver.save(sess, parameter_path)
    print("save model in {}".format(save_path))
    # 将训练好的权重拷回到obs
    mox.file.copy_parallel(src_url="checkpoint/",
                           dst_url="obs://canncamps-hw38939615/ckpt")
def main():

    for i in range(10):
        # i = 3
        tf.reset_default_graph()

        X = tf.Variable(tf.random_uniform(shape=(1, 32, 32, 1),
                                          minval=0,
                                          maxval=0.5),
                        name="X")

        net = Lenet(X, is_trainable=False)

        with tf.name_scope("am"):
            am = (tf.log(net.output[:, i]) -
                  0.5 * tf.norm(X, ord=2, name="l2-norm"))

        with tf.name_scope("optimize-am"):
            optimizer = tf.train.AdamOptimizer(learning_rate=cfg.LEARNING_RATE)
            training_op = optimizer.minimize(
                -am)  # minimizing(-cost) equals to maximize(cost)

        saver = tf.train.Saver(
            tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="LeNet"))

        with tf.Session() as sess:
            tf.global_variables_initializer().run()
            saver.restore(sess, cfg.FINAL_PATH)
            print("Optimizing...")
            print()
            for epoch in range(5000):
                sess.run(training_op)
                print("Epoch:", epoch, " Loss:", am.eval())
                print(' Classes Probability:', net.output.eval())

            img = sess.run(tf.squeeze(
                X))  # -tf.reduce_min(X))/(tf.reduce_max(X)-tf.reduce_min(X))
            img = Image.fromarray(img, "F")
            img.save("./img/prototype{}.tiff".format(i))

            print("Prototype saved as image")
    def test(self):
        (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()

        x_train = np.array(x_train) / 255
        x_test = np.array(x_test) / 255

        y_train = keras.utils.to_categorical(y_train)
        y_test = keras.utils.to_categorical(y_test)

        lenet = Lenet()
        lenet.Fit(x_train, y_train, 10, 128)
        score = lenet.Evaluate(x_test, y_test)

        print('Test loss:', score[0])
        print('Test accuracy:', score[1])
        y_pred = lenet.Predict(x_test)

        y_test = np.argmax(y_test, axis=1)
        y_pred = np.argmax(y_pred, axis=1)

        print()
        print(lenet.ConfusionMatrix(y_test, y_pred))
Beispiel #12
0
else:
    trainXX = trainXX.reshape(trainXX.shape[0], 28, 28, 1)




# data = dataset.data.astype("float") / 255.0

(trainX ,testX, trainY, testY)=train_test_split( trainXX, trainYY, test_size = 0.25)

lb = LabelBinarizer()

trainY = lb.fit_transform(trainY)

testY = lb.fit_transform(testY)

model = Lenet.load(width=28, height=28, depth=1, classes=10)


# model.add(Dense(256, input_shape=(784,), activation="sigmoid"))
# model.add(Dense(128, activation="sigmoid"))
# model.add(Dense(10, activation="softmax"))

print("[INFO] training network.....")
sgd = SGD(0.01)
model.compile(loss="categorical_crossentropy", optimizer=sgd, metrics=["accuracy"])
H = model.fit(trainX, trainY, validation_data=(testX, testY), epochs=20, batch_size=128)
print("[INFO] evaluating network.....")
predictions = model.predict(testX,batch_size=128)
print(classification_report(testY.argmax(axis=1), predictions.argmax(axis=1), target_names=[str(x) for x in lb.classes_]))
Beispiel #13
0
"""
将保存了训练时伪量化信息的checkpoint文件转换成freeze pb文件
"""
import tensorflow as tf
import config as cfg

from lenet import Lenet
from tensorflow.python.framework import graph_util

# os.environ['CUDA_VISIBLE_DEVICES'] = '1'

with tf.Session() as sess:
    le_net = Lenet(False)
    saver = tf.train.Saver(
    )  # 不可以导入train graph,需要重新创建一个graph,然后将train graph图中的参数来填充该图
    saver.restore(sess, cfg.PARAMETER_FILE)

    frozen_graph_def = graph_util.convert_variables_to_constants(
        sess, sess.graph_def, ['predictions'])
    tf.io.write_graph(frozen_graph_def,
                      "pb_model",
                      "freeze_eval_graph.pb",
                      as_text=False)
Beispiel #14
0
 def __init__(self):
     self.lenet = Lenet()
     self.sess = tf.Session()
     self.parameter_path = cfg.PARAMETER_FILE
     self.saver = tf.train.Saver()
Beispiel #15
0
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
x_train, y_train = mnist.train.images, mnist.train.labels
x_val, y_val = mnist.validation.images, mnist.validation.labels
x_test, y_test = mnist.test.images, mnist.test.labels

# Training

with tf.Graph().as_default():
    session_conf = tf.ConfigProto(
        allow_soft_placement=FLAGS.allow_soft_placement,
        log_device_placement=FLAGS.log_device_placement
    )
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        lenet = Lenet(dropout_keep_prob=FLAGS.dropout_keep_prob,
                      learning_rate=FLAGS.learning_rate,
                      l2_reg_lambda=FLAGS.l2_reg_lambda)

        saver = tf.train.Saver(tf.global_variables())

        if os.path.exists(FLAGS.parameter_file):
            saver.restore(sess, FLAGS.paramter_file)
        else:
            sess.run(tf.global_variables_initializer())

        for i in range(FLAGS.epoch):
            batch = mnist.train.next_batch(FLAGS.batch_size)
            if i % 500 == 0:
                dev_accuracy = sess.run(lenet.train_accuracy, feed_dict={
                    lenet.raw_input_image: x_val, lenet.raw_input_label: y_val
                })
Beispiel #16
0

def transform_img(image, is_plotted=False):
    resized = tf.image.resize_images(image, (28, 28),
                                     tf.image.ResizeMethod.AREA)
    resized = resized.numpy().reshape((28, 28))
    if is_plotted:
        import matplotlib.pyplot as plt
        plt.imshow(resized, cmap='gray')
        plt.show()
    resized = resized / 255
    resized = resized.reshape((1, 28, 28, 1))
    return resized


lenet = Lenet(20, 64, tf.train.AdamOptimizer(learning_rate=0.001),
              tf.losses.softmax_cross_entropy)
lenet.load_model()


# mouse callback function
def draw_circle(event, x, y, _, __):
    global start_point, end_point, drawing

    if event == cv2.EVENT_LBUTTONDOWN:
        drawing = True
        start_point = (x, y)

    elif event == cv2.EVENT_MOUSEMOVE:
        if drawing:
            end_point = (x, y)
            cv2.line(img, start_point, end_point, 255, 20)
Beispiel #17
0
def main():
    mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
    test_images = mnist.test.images
    test_labels = mnist.test.labels
    sess = tf.Session()
    batch_size = cfg.BATCH_SIZE
    parameter_path = cfg.PARAMETER_FILE

    lenet = Lenet()

    max_iter = cfg.MAX_ITER

    variables = get_variables_to_restore()
    save_vars = [
        variable for variable in variables
        if not re.search("Adam", variable.name)
    ]

    saver = tf.train.Saver(save_vars)

    sess.run(tf.initialize_all_variables())

    tf.summary.scalar("loss", lenet.loss)
    summary_op = tf.summary.merge_all()

    train_summary_writer = tf.summary.FileWriter("logs", sess.graph)

    for i in range(max_iter):
        batch = mnist.train.next_batch(batch_size)
        if i % 100 == 0:
            train_accuracy, summary = sess.run(
                [lenet.train_accuracy, summary_op],
                feed_dict={
                    lenet.raw_input_image: batch[0],
                    lenet.raw_input_label: batch[1]
                })
            train_summary_writer.add_summary(summary)
            print("step %d, training accuracy %g" % (i, train_accuracy))

        if i % 500 == 0:
            test_accuracy = sess.run(lenet.train_accuracy,
                                     feed_dict={
                                         lenet.raw_input_image: test_images,
                                         lenet.raw_input_label: test_labels
                                     })
            print("\n")
            print("step %d, test accuracy %g" % (i, test_accuracy))
            print("\n")

        sess.run(lenet.train_op,
                 feed_dict={
                     lenet.raw_input_image: batch[0],
                     lenet.raw_input_label: batch[1]
                 })
    saver.save(sess, parameter_path)
    print("saved model")

    # 保存为saved_Model
    # Export checkpoint to SavedModel
    builder = tf.saved_model.builder.SavedModelBuilder("pb_model")
    inputs = {
        "inputs": tf.saved_model.utils.build_tensor_info(lenet.raw_input_image)
    }

    outputs = {
        "predictions":
        tf.saved_model.utils.build_tensor_info(lenet.predictions)
    }

    prediction_signature = tf.saved_model.signature_def_utils.build_signature_def(
        inputs=inputs,
        outputs=outputs,
        method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME)
    legacy_init_op = tf.group(tf.tables_initializer(), name="legacy_init_op")
    builder.add_meta_graph_and_variables(
        sess, [tf.saved_model.tag_constants.SERVING],
        signature_def_map={"serving_default": prediction_signature},
        legacy_init_op=legacy_init_op,
        saver=saver)

    builder.save()
Beispiel #18
0
 X_test = X_test[:, :, :, None]
 train_data = (X_train, y_train)
 test_data = (X_test, y_test)
 train_data = (train_data[0],
               keras.utils.to_categorical(train_data[1],
                                          num_classes=CLASS_NUM))
 test_data = (test_data[0],
              keras.utils.to_categorical(test_data[1],
                                         num_classes=CLASS_NUM))
 # get GAN discriminator score for training set
 d_pret = d.predict(X_train, verbose=1)
 d_pret = d_pret.reshape((60000, ))
 #d_pret = np.sort(d_pret)
 disc_scores = preprocessing.scale(d_pret) + 0.5
 # Create a classifier
 classifier_daal = Lenet(session, scope='daal_lenet')
 #do the experiment many times with different random_seeds
 daal_all = []
 for iteration in range(ACTIVE_LEARNING_ITERS):
     print "Active Learning Iteration:", iteration
     daal_accuracies = []
     #shuffle the training data
     images, targets = train_data[0].copy(), train_data[1].copy(
     )  #TODO better not to copy and play with indices, instead. whatever!
     scores = disc_scores[:]
     assert len(scores) == train_data[0].shape[0]
     rng_state = np.random.get_state()
     np.random.shuffle(images)
     np.random.set_state(rng_state)
     np.random.shuffle(targets)
     np.random.set_state(rng_state)
Beispiel #19
0
def main():

    now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
    root_logdir = "tf_logs"
    logdir = "{}/run-{}/".format(root_logdir, now)

    mnist = input_data.read_data_sets("MNIST_data/", reshape=False)
    X_train, y_train = mnist.train.images, mnist.train.labels
    X_validation, y_validation = mnist.validation.images, mnist.validation.labels
    X_test, y_test = mnist.test.images, mnist.test.labels

    assert (len(X_train) == len(y_train))
    assert (len(X_validation) == len(y_validation))
    assert (len(X_test) == len(y_test))

    print()
    print("Image Shape: {}".format(X_train[0].shape))
    print()
    print("Training Set:   {} samples".format(len(X_train)))
    print("Validation Set: {} samples".format(len(X_validation)))
    print("Test Set:       {} samples".format(len(X_test)))

    #The MNIST data that TensorFlow pre-loads comes as 28x28x1 images.
    #However, the LeNet architecture only accepts 32x32xC images, where C is the number of color channels.
    # Pad images with 0s
    X_train = np.pad(X_train, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')
    X_validation = np.pad(X_validation, ((0, 0), (2, 2), (2, 2), (0, 0)),
                          'constant')
    X_test = np.pad(X_test, ((0, 0), (2, 2), (2, 2), (0, 0)), 'constant')

    print("Updated Image Shape: {}".format(X_train[0].shape))

    #Shuffle the training data
    X_train, y_train = shuffle(X_train, y_train)

    X = tf.placeholder(tf.float32, shape=(None, 32, 32, 1), name="X")
    y = tf.placeholder(tf.int32, shape=(None), name="y")
    one_hot_y = tf.one_hot(y, 10)

    net = Lenet(X, is_trainable=True)
    with tf.name_scope("loss"):
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(
            labels=one_hot_y, logits=net.output)
        loss_operation = tf.reduce_mean(cross_entropy, name="loss")

    with tf.name_scope("train"):
        optimizer = tf.train.AdamOptimizer(learning_rate=cfg.LEARNING_RATE)
        training_op = optimizer.minimize(loss_operation)

    with tf.name_scope("eval"):
        correct = tf.equal(tf.argmax(net.output, 1), tf.argmax(one_hot_y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, tf.float32))

    init = tf.global_variables_initializer()

    saver = tf.train.Saver()

    cross_entropy_summary = tf.summary.scalar('cross_entropy', loss_operation)
    acc_train_summary = tf.summary.scalar('training_accuracy', accuracy)
    acc_val_summary = tf.summary.scalar('validation_accuracy', accuracy)

    file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())

    num_examples = len(X_train)
    num_batches = num_examples // cfg.BATCH_SIZE

    with tf.Session() as sess:
        init.run()
        print("Training...")
        print()
        for epoch in range(cfg.EPOCHS):
            X_train, y_train = shuffle(X_train, y_train)
            batch_index = 0
            for offset in range(0, num_examples, cfg.BATCH_SIZE):
                end = offset + cfg.BATCH_SIZE
                X_batch, y_batch = X_train[offset:end], y_train[offset:end]

                if batch_index % 10 == 0:
                    cross_entropy_str = cross_entropy_summary.eval(feed_dict={
                        X: X_batch,
                        y: y_batch
                    })
                    step = epoch * num_batches + batch_index
                    file_writer.add_summary(cross_entropy_str, step)

                sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
                batch_index += 1

            acc_train_str = acc_train_summary.eval(feed_dict={
                X: X_train,
                y: y_train
            })
            acc_val_str = acc_val_summary.eval(feed_dict={
                X: X_validation,
                y: y_validation
            })
            file_writer.add_summary(acc_train_str, epoch)
            file_writer.add_summary(acc_val_str, epoch)
            print("Epoch:", epoch)
            save_path = saver.save(sess, cfg.INTERMEDIATE_PATH)

        save_path = saver.save(sess, cfg.FINAL_PATH)
        print("Model Saved")

    file_writer.close()

    with tf.Session() as sess:
        saver.restore(sess, save_path)

        acc_test = accuracy.eval(feed_dict={X: X_test, y: y_test})
        print("Test Accuracy = {:.3f}".format(acc_test))
Beispiel #20
0
    #saver = tf.train.import_meta_graph(MODEL_PATH+'/model-100.meta')
    print MODEL_PATH
    with tf.Session(graph=new_graph) as session:
        # Create and Restore pre-trained  GAN 
        real_data = tf.placeholder(tf.float32, shape=[128, OUTPUT_DIM])
        disc_real = Discriminator(real_data)
        var_list = [v for v in tf.global_variables() if  "Discriminator" in v.name ]
        saver = tf.train.Saver(var_list)
	saver.restore(session, tf.train.latest_checkpoint(MODEL_PATH))
        # get GAN discriminator score for training set
        train_data, dev_data, test_data = get_mnist()
	disc_scores = run_all(session, disc_real, real_data, train_data[0])
	disc_scores -= min(disc_scores)
	disc_scores /= max(disc_scores) 
        # Create a classifier
        classifier_entropy = Lenet(None,scope='entropy_lenet')
        classifier_daal = Lenet(None,scope='daal_lenet')
        var_list = [v for v in tf.global_variables() if  "target" in v.name ]
        print "*************"
	print var_list
        var_list = [v for v in tf.global_variables() if  "lenet" in v.name ]
        print "*************"
	for v in var_list:
		print v
        train_data = (train_data[0], keras.utils.to_categorical(train_data[1], num_classes=CLASS_NUM)) 
        test_data = (test_data[0], keras.utils.to_categorical(test_data[1], num_classes=CLASS_NUM)) 
        #do the experiment many times with different random_seeds
        daal_all= []
        entropy_all = []
        for iteration in range(ACTIVE_LEARNING_ITERS):
            print "Active Learning Iteration:", iteration
    image_datasets = {
        x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
        for x in ['train', 'val']
    }
    dataloaders = {
        x: torch.utils.data.DataLoader(image_datasets[x],
                                       batch_size=20,
                                       shuffle=True,
                                       num_workers=4)
        for x in ['train', 'val']
    }
    dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'val']}
    class_names = image_datasets['train'].classes
    print("class_names", class_names)

    model_ft = Lenet()
    model_ft.conv1 = nn.Conv2d(3, 6, 3)  # 这里是彩色图像
    num_ftrs = model_ft.fc3.in_features
    # Alternatively, it can be generalized to nn.Linear(num_ftrs, len(class_names)).
    model_ft.fc = nn.Linear(num_ftrs, len(class_names))

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
        model_ft = nn.DataParallel(model_ft)

    model_ft = model_ft.to(device)

    criterion = nn.CrossEntropyLoss()

    # Observe that all parameters are being optimized
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.datasets.mnist import read_data_sets
from lenet import Lenet

mnist = read_data_sets("mnist_data/", one_hot=True)

batch_size = 100
lenet_part = Lenet(mu = 0, sigma = 0.3, learning_rate = 0.001)
merged = lenet_part.merged_summary
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    train_writer = tf.summary.FileWriter('./log/train', sess.graph)
    test_writer = tf.summary.FileWriter('./log/test')

    for i in range(3000):
        batch = mnist.train.next_batch(batch_size)
        _, train_acc, train_sum= sess.run([lenet_part.training_step, lenet_part.accuracy, merged],
                                    feed_dict = {lenet_part.raw_input_image: batch[0],lenet_part.raw_input_label: batch[1]})

        test_acc, test_sum = sess.run([lenet_part.accuracy, merged],
                                      feed_dict = {lenet_part.raw_input_image: mnist.test.images,
                                                   lenet_part.raw_input_label: mnist.test.labels})
        if i%10 ==0:
            test_writer.add_summary(test_sum, i)
            train_writer.add_summary(train_sum, i)
            print('[train_acc, test_acc]: ', train_acc, test_acc)
    train_writer.close()
    test_writer.close()