Beispiel #1
0
    def run(self):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            dataset = Imagenet.Cifar()
            train_data, train_label, test_data, test_label = dataset.getdata()
            train_label = de_onehot(train_label)
            test_label = de_onehot(test_label)

            path = "Equal/32-64-128-256-375-08-05"
Beispiel #2
0
def run(epochs):
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        dataset = Imagenet.Cifar()
        trX, trY, teX, teY = dataset.getdata()

        filetime = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
        #path = "Networkfile/" + "convAENN_noise"
        path = "Networkfile/convAENN" + filetime
        saver = NNutils.save(path, sess)
        writer, merged = NNutils.graph(path, sess)

        test_indices = np.arange(len(teX))
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:(batch_size)]

        st_time = datetime.now()

        for i in range(epochs):
            print(i, st_time)
            for start, end in zip(range(0, len(trX), batch_size),
                                  range(batch_size, len(trX), batch_size)):
                summary, _, loss_nn, loss_ae, learning_rate, step = sess.run(
                    [merged, trainop, cost_NN, cost_AE, lr, global_step],
                    feed_dict={
                        X: trX[start:end],
                        Y: trY[start:end],
                        dropout_conv: 0.5,
                        dropout_fc: 0.5
                    })
                if step % 50 == 0:
                    writer.add_summary(summary, step)
                    print(step, datetime.now(), loss_nn, loss_ae,
                          learning_rate)

            loss_nn, loss_ae, accuracy = sess.run(
                [cost_NN, cost_AE, acc_op],
                feed_dict={
                    X: teX[test_indices],
                    Y: teY[test_indices],
                    dropout_conv: 1.0,
                    dropout_fc: 1.0
                })
            print("test results : ", accuracy, loss_nn, loss_ae)
            saver.save(sess, path + "/model.ckpt", step)

            # im = im.astype('uint8')
            # im = Image.fromarray(im[0])
            # im.save('convAENN.jpg')

        end_time = datetime.now()
        print("걸린 시간 = ", end_time - st_time)
Beispiel #3
0
def run(epochs):
    with tf.Session() as sess:

        tf.global_variables_initializer().run()
        dataset = Imagenet.Cifar()
        trX, trY, teX, teY = dataset.getdata()
        filetime = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
        #path = "Networkfile/convAE_sep" + "2017_03_27_20_32"
        path = "Networkfile/convAENN_sep" + filetime
        saver = NNutils.save(path, sess)
        writer, merged = NNutils.graph(path, sess)

        test_indices = np.arange(len(teX))
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:(batch_size)]

        #run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        #run_metadata = tf.RunMetadata()

        st_time = datetime.now()
        for i in range(epochs):
            print(i, st_time)
            for start, end in zip(range(0, len(trX), batch_size),
                                  range(batch_size, len(trX), batch_size)):
                summary, _, _, cost_ae, cost_nn, step = sess.run(
                    [
                        merged, trainop_unsuper, trainop_super, cost_unsuper,
                        cost_super, global_step
                    ],
                    feed_dict={
                        x: trX[start:end],
                        y: trY[start:end],
                        dropout_conv: 1.0,
                        dropout_fc: 0.8
                    })
                if step % 50 == 0:
                    writer.add_summary(summary, step)
                    print(step, datetime.now(), cost_nn, cost_ae)

            accuracy, cost_nn, cost_ae = sess.run(
                [acc_op, cost_super, cost_unsuper],
                feed_dict={
                    x: teX[test_indices],
                    y: teY[test_indices],
                    dropout_conv: 1.0,
                    dropout_fc: 1.0
                })

            saver.save(sess, path + "/model.ckpt", step)
            print("test results : ", accuracy, cost_nn, cost_ae)

            # _, loss_super, step = sess.run([trainop_super, cost_super, global_step], feed_dict={y: trY[start:end]
            #                                                                  ,dropout_conv: 0.8, dropout_fc : 0.6})

        #tf.graph_util.convert_variables_to_constants(sess, )
        # if step % 50 == 0:
        #     writer.add_summary(summary, step)
        #     print(step, loss_super, loss_unsuper)

        #print(np.shape(trX))
        #summary, accuracy, loss = sess.run([merged, acc_op, cost], feed_dict={ X: teX[test_indices], Y: teY[test_indices]})
        #print(step, datetime.now(), loss_unsuper, loss_super, learning_rate)

        # loss_su, loss_un, accuracy, step = sess.run([cost_super, cost_unsuper, acc_op, global_step], feed_dict={X: teX[test_indices], Y: teY[test_indices],
        #                                                           dropout_conv : 1.0, dropout_fc : 1.0})
        # print("test results : ", accuracy, loss_super, loss_unsuper)
        # saver.save(sess, path + "/model.ckpt", step)

        end_time = datetime.now()
        print("걸린 시간 = ", end_time - st_time)
Beispiel #4
0
with tf.name_scope(
        "accuracy"):  #accuracy는 tf를 이용하는게 좋다. tensorboard로 볼 수 있기 때문
    predict_op = tf.equal(tf.argmax(Y, 1), tf.argmax(py_x, 1))
    acc_op = tf.reduce_mean(tf.cast(predict_op, "float"))
    tf.summary.scalar("accuracy", acc_op)

#네트워크 저장 관련
#saver = tf.train.Saver()
#global_step = tf.Variable(0, name='global_step', trainable=False)
#ckpt_dir = "./Networkfile/conv"
#if not os.path.exists(ckpt_dir):
#    os.makedirs(ckpt_dir)

#이미지넷
imagenet = Imagenet.Imagenet()
train_num, test_num = imagenet.getnum()

with tf.Session() as sess:
    #초기 설정
    #tf.initialize_all_variables().run()
    tf.global_variables_initializer().run()
    test_indices = np.arange(test_num)
    np.random.shuffle(test_indices)
    test_indices = test_indices[0:batch_size]

    #그래프 관련
    graphPath = "./logs/conv_logs"
    if not os.path.exists(graphPath):
        os.makedirs(graphPath)
    shutil.rmtree(graphPath)
Beispiel #5
0
def run(epochs):
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        dataset = Imagenet.Cifar()
        trX, trY, teX, teY = dataset.getdata()
        train_gray, test_gray = dataset.getgray()
        #train_gray = train_gray.reshape(-1, 32, 32, 1)
        #test_gray = test_gray.reshape(-1, 32, 32, 1)

        filetime = datetime.now().strftime("%Y_%m_%d_%H_%M")
        path = "ConvAE/" + "rgb2gray"
        #path = "Networkfile/convAE" + filetime
        saver = NNutils.save(path, sess)
        writer, writer_test, merged = NNutils.graph(path, sess)

        test_indices = np.arange(len(teX))
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:(batch_size)]  #deconv때문에 batchsize보다 크면 문제가 발생한다.

        st_time = datetime.now()

        for i in range(epochs):
            print(i, st_time)
            for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX), batch_size)):
                summary,\
                _, loss, \
                learning_rate, step = sess.run([merged,
                                                trainop, cost,
                                                # trainop_nn, cost_nn,
                                                lr, global_step],
                                               feed_dict={ X: trX[start:end],
                                                           # Y: trY[start:end],
                                                           x_gray: train_gray[start:end],
                                                           dropout_conv : 0.8, dropout_fc : 1.0})

                #print(loss)
                if step % 50 == 0:
                    writer.add_summary(summary, step)
                    print(step, datetime.now(), loss, learning_rate)


            loss, arr = sess.run([cost, X_],
                            feed_dict={X: teX,
                                       # Y: teY[0:batch_size],
                                       x_gray: test_gray,
                                       dropout_conv : 1.0, dropout_fc : 1.0})

            # arr_uint = arr.astype('uint8')
            # #arr_uint = arr_uint.reshape(-1, 32 * 32 * 1)
            # im = Image.fromarray(arr_uint[0])
            # im.show()

            print("test results : ", loss)
            saver.save(sess, path + "/model.ckpt", step)


        end_time = datetime.now()
        print("걸린 시간 = ", end_time - st_time)

        # 평가
        # SVM 학습
        x_train = sess.run(Z, feed_dict={X: trX,
                                                       x_gray: test_gray,
                                                       dropout_conv: 1.0,
                                                       dropout_fc: 1.0,
                                                       })
        x_train = x_train.reshape(len(x_train), -1)
        y_train = np.argmax(trY, 1)

        print(x_train.shape)



        # SVM 예측 정확도 계산
        x_test = sess.run(Z, feed_dict={X: teX,
                                                      x_gray: test_gray,
                                                      dropout_conv: 1.0,
                                                      dropout_fc: 1.0,
                                                      })
        x_test = x_test.reshape(len(x_test), -1)
        y_test = np.argmax(teY, 1)

        accuracy = 0
        iteration = 50
        for i in range(iteration):
            print(i)
            clf = svm.LinearSVC(max_iter=200)
            clf.fit(x_train, y_train)
            acc = clf.score(x_test, y_test)
            print(acc)
            accuracy += acc

        accuracy /= iteration
        print(accuracy)
Beispiel #6
0
with tf.name_scope("cost"):
    nn_Y = NN(X)

    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=nn_Y, labels=Y))
    train_op = tf.train.AdamOptimizer(0.001).minimize(cost, global_step)
    tf.summary.scalar("cost", cost)

predict_op = tf.equal(tf.arg_max(Y, 1), tf.arg_max(nn_Y, 1))
acc_op = tf.reduce_mean(tf.cast(predict_op, "float"))

#데이터 관련
# mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
# trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
dataset = Imagenet.Cifar()
trX, trY, teX, teY = dataset.getdata()
trX = trX.reshape(-1, 32 * 32 * 3)
teX = teX.reshape(-1, 32 * 32 * 3)

print(tf.shape(cost))

with tf.Session() as sess:
    tf.global_variables_initializer().run()
    batch_size = 128
    st_time = datetime.now()
    print(st_time)

    savepath = "Networkfile/NN"
    saver = NNutils.save(savepath, sess)
    writer, merge = NNutils.graph("Networkfile/NN", sess)
Beispiel #7
0
def run(epochs):
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        dataset = Imagenet.Cifar()
        trX, trY, teX, teY = dataset.getdata()


        print(teY.shape)

        filetime = datetime.now().strftime("%Y_%m_%d_%H_%M")
        path = "convAE/" + "rgb2rgb"
        #path = "Networkfile/convAE" + filetime
        saver = NNutils.save(path, sess)
        writer, writer_test, merged = NNutils.graph(path, sess)

        test_indices = np.arange(len(teX))
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:batch_size]

        st_time = datetime.now()

        for i in range(epochs):
            print(i, st_time)
            for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX), batch_size)):
                summary, _,\
                loss, learning_rate,\
                step = sess.run([merged, trainop,
                                 cost, lr,
                                 global_step],
                                feed_dict={ X: trX[start:end],
                                            dropout_conv : 0.8, dropout_fc : 0.5})
                if step % 50 == 0:
                    writer.add_summary(summary, step)
                    print(step, datetime.now(), loss, learning_rate)


            loss, results = sess.run([cost, Z], feed_dict={X: teX,
                                                           dropout_conv: 1.0,
                                                           dropout_fc: 1.0,
                                                           })
            print("test results : ", loss)
            saver.save(sess, path + "/model.ckpt", step)


            #
            # image = image.astype('uint8')
            # im = Image.fromarray(image[0])
            # im.show()

            # image = teX.astype('uint8')
            # im = Image.fromarray(image[0])
            # im.show()

        end_time = datetime.now()
        print("걸린 시간 = ", end_time - st_time)

        #평가
        # SVM 학습
        loss, x_train = sess.run([cost, Z], feed_dict={X: trX,
                                                       dropout_conv: 1.0,
                                                       dropout_fc: 1.0,
                                                       })
        x_train = x_train.reshape(len(x_train), -1)
        y_train = np.argmax(trY, 1)


        print(x_train.shape)
        clf = svm.LinearSVC(max_iter=500, random_state=2)
        clf.fit(x_train, y_train)

        # SVM 예측 정확도 계산
        loss, x_test = sess.run([cost, Z], feed_dict={X: teX,
                                                       dropout_conv: 1.0,
                                                       dropout_fc: 1.0,
                                                       })
        x_test = x_test.reshape(len(x_test), -1)
        y_test = np.argmax(teY, 1)

        accuracy = 0
        iteration = 50
        for i in range(iteration):
            print(i)
            clf = svm.LinearSVC(max_iter=200)
            clf.fit(x_train, y_train)
            acc = clf.score(x_test, y_test)
            print(acc)
            accuracy += acc

        accuracy /= iteration
        print(accuracy)
Beispiel #8
0
    def run(self, epochs):
        self.train_unsuper()
        #self.train_super()
        with tf.Session() as sess:

            tf.global_variables_initializer().run()
            dataset = Imagenet.Cifar()
            trX, trY, teX, teY = dataset.getdata()

            filetime = datetime.now().strftime("%Y_%m_%d_%H_%M")
            path = "Networkfile/convAE_sep_class"
            #path = "Networkfile/convAENN_sep" + filetime
            saver = NNutils.save(path, sess)
            writer, merged = NNutils.graph(path, sess)

            test_indices = np.arange(len(teX))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:(self.batch_size)]

            st_time = datetime.now()
            for i in range(epochs):
                print(i, st_time)
                for start, end in zip(
                        range(0, len(trX), self.batch_size),
                        range(self.batch_size, len(trX), self.batch_size)):
                    _ = sess.run(self.trainop_unsuper,
                                 feed_dict={
                                     self.x: trX[start:end],
                                     self.dropout_conv: 0.8,
                                     self.dropout_fc: 0.6
                                 })

                    # graph_def = graph_pb2.GraphDef()
                    # output_names = ""
                    # tf.graph_util.convert_variables_to_constants(sess, graph_def, output_names)
                    #print(cost_ae)
                output = sess.run(self.z,
                                  feed_dict={
                                      self.x: trX[start:end],
                                      self.dropout_conv: 0.8,
                                      self.dropout_fc: 0.6
                                  })
                z = tf.constant(output)
                print(z.shape)
                # cost_nn, _ = sess.run([self.cost_super, self.trainop_super],
                #                             feed_dict={self.x_nn: self.z, self.y: trY[start:end],
                #                                        self.dropout_conv: 0.8, self.dropout_fc : 0.6})
                #

                #_, loss_super, step = sess.run([trainop_super, cost_super, global_step], feed_dict={x:trX[0:0], y: trY[start:end]
                #                                                                  ,dropout_conv: 0.8, dropout_fc : 0.6})

                # if step % 50 == 0:
                #     writer.add_summary(summary, step)
                #     print(step, loss_super, loss_unsuper)

                #print(np.shape(trX))
                #summary, accuracy, loss = sess.run([merged, acc_op, cost], feed_dict={ X: teX[test_indices], Y: teY[test_indices]})
                #print(step, datetime.now(), loss_unsuper, loss_super, learning_rate)

                # loss_su, loss_un, accuracy, step = sess.run([cost_super, cost_unsuper, acc_op, global_step], feed_dict={X: teX[test_indices], Y: teY[test_indices],
                #                                                           dropout_conv : 1.0, dropout_fc : 1.0})
                # print("test results : ", accuracy, loss_super, loss_unsuper)
                # saver.save(sess, path + "/model.ckpt", step)

            end_time = datetime.now()
            print("걸린 시간 = ", end_time - st_time)