Exemple #1
0
def result(sess, teX, test_gray, path):
    loss, arr = sess.run([cost, X_],
                         feed_dict={X: teX[0:batch_size],
                                    x_gray: test_gray[0:batch_size],
                                    dropout_conv: 1.0, dropout_fc: 1.0})

    NNutils.create_image(arr, path)
Exemple #2
0
    def model_fc(self,
                 z,
                 layer=[2 * 2 * 256, 2 * 2 * 128, 2 * 2 * 64, 2 * 2 * 16, 10]):
        output = z

        layer_num = 0

        with tf.name_scope('reshape'):
            reshape_size = 2 * 2 * 256
            output = tf.reshape(output, [-1, reshape_size])

        for i in range(3):
            layer_num += 1
            with tf.variable_scope('fc' + str(layer_num)):
                output = NNutils.create_layer('fc',
                                              output,
                                              layer[layer_num],
                                              var_list=self.train_list,
                                              dropout=self.dropout_normal)

        layer_num += 1
        with tf.variable_scope('fc' + str(layer_num)):
            output = NNutils.create_layer('fc',
                                          output,
                                          layer[layer_num],
                                          var_list=self.train_list,
                                          activation='none')
        y = output

        return y
Exemple #3
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            # MNIST 데이터
            dataset = mnist.read_data_sets("MNIST_data/", one_hot=True)
            train_data, train_label, test_data, test_label = dataset.train.images, dataset.train.labels, \
                                                             dataset.test.images, dataset.test.labels
            train_data = train_data.reshape(
                -1, self.time_step,
                int(28 * 28 /
                    self.time_step))  #TF의 RNN은 입력데이터의 rank가 높아야함(열이 많아야 함)
            test_data = test_data.reshape(
                -1, self.time_step,
                int(28 * 28 / self.time_step))  #mnist 데이터는 사실 적절하진 못함

            test_indices = np.arange(len(train_data))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:10000]

            path = "LSTM_mnist/" + str(step_limit) + ""
            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, train_data.shape[0], self.batch_size),
                        range(self.batch_size, train_data.shape[0],
                              self.batch_size)):
                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.training, self.cost,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 0.5})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, \
                loss, \
                accuracy = sess.run([merged, self.cost, self.accuracy],
                                    feed_dict={self.x: test_data,
                                               self.y: test_label,
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                writer_test.add_summary(summary, step)
                print("test results : ", accuracy, loss)
                saver.save(sess, path + "/model.ckpt", step)
Exemple #4
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            dataset = mnist.read_data_sets("MNIST_data/", one_hot=True)
            train_data, train_label, test_data, test_label = dataset.train.images, dataset.train.labels, \
                                          dataset.test.images, dataset.test.labels

            # 이 데이터셋은 기본적으로 0~1사이 값으로 정규화되어있어 이미지를 볼 수 없다. => 0~255로 변경
            train_data = train_data * 255
            test_data = test_data * 255

            test_indices = np.arange(len(test_data))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:1000]

            path = "AE/" + str(step_limit)
            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, len(train_data), self.batch_size),
                        range(self.batch_size, len(train_data),
                              self.batch_size)):
                    summary, \
                    _, loss, \
                    _, \
                    step = sess.run([merged,
                                     self.training, self.cost,
                                     self.training_fc,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.dropout_conv: 0.8,
                                               self.dropout_normal: 0.5})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, loss, loss_fc, im = sess.run(
                    [merged, self.cost, self.cost_fc, self.im],
                    feed_dict={
                        self.x: test_data,
                        self.y: test_label,
                        self.dropout_conv: 1.0,
                        self.dropout_normal: 1.0
                    })

                writer_test.add_summary(summary, step)
                print("test results : ", loss, loss_fc)
                saver.save(sess, path + "/model.ckpt", step)
Exemple #5
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            dataset = mnist.read_data_sets(one_hot=True)
            train_data, train_label, test_data, test_label = dataset.train.images, dataset.train.labels, \
                                          dataset.test.images, dataset.test.labels

            test_indices = np.arange(len(test_data))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:1000]

            name = self.info()
            path = "mnist/" + str(step_limit) + name

            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, len(train_data), self.batch_size),
                        range(self.batch_size, len(train_data),
                              self.batch_size)):
                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.training, self.cost,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, \
                loss, \
                accuracy = sess.run([merged, self.cost, self.accuracy],
                                    feed_dict={self.x: test_data,
                                               self.y: test_label,
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                writer_test.add_summary(summary, step)
                print("test results : ", accuracy, loss)
                saver.save(
                    sess,
                    path + "/" + name + ".ckpt",
                    step,
                )
Exemple #6
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            #cifar-10 데이터(32*32*3)을 받아오고, fc로 할 수 있도록 형태를 변환한다.(32*32*3 => 3072)
            dataset = cifar.Cifar()
            train_data, train_label, test_data, test_label = dataset.getdata()
            train_data = train_data.reshape(-1, 3072)
            test_data = test_data.reshape(-1, 3072)

            test_indices = np.arange(len(test_data))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:1000]

            name = self.info()
            path = "cifar/" + str(step_limit) + name

            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, len(train_data), self.batch_size),
                        range(self.batch_size, len(train_data),
                              self.batch_size)):
                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.training, self.cost,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, \
                loss, \
                accuracy = sess.run([merged, self.cost, self.accuracy],
                                    feed_dict={self.x: test_data,
                                               self.y: test_label,
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                writer_test.add_summary(summary, step)
                print("test results : ", accuracy, loss)
                saver.save(sess, path + "/" + name + ".ckpt", step)
Exemple #7
0
    def run(self, step_limit):
        self.train()

        rcv1 = fetch_rcv1(subset='train')
        train_data = rcv1.data
        train_label = rcv1.target

        rcv1 = fetch_rcv1(subset='test', random_state=1)
        test_data = rcv1.data
        test_label = rcv1.target

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            path = "LSTM/" + str(step_limit) + "rcv1"
            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)

                for start, end in zip(
                        range(0, train_data.shape[0], self.batch_size),
                        range(self.batch_size, train_data.shape[0],
                              self.batch_size)):
                    data = scipy.sparse.coo_matrix(train_data[start:end])
                    label = scipy.sparse.coo_matrix(train_label[start:end])
                    indices = np.array([data.row, data.col]).T

                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.training, self.cost,
                                     self.global_step],
                                    feed_dict={self.x: (indices, data.data, data.shape),
                                               self.y: (indices, label.data, label.shape),
                                               self.dropout_conv: 1.0, self.dropout_normal: 1.0})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                        summary, \
                        loss, \
                        accuracy = sess.run([merged, self.cost, self.accuracy],
                                            feed_dict={self.x: test_data[0:1000],
                                                       self.y: test_label[0:1000],
                                                       self.dropout_conv: 1.0, self.dropout_normal: 1.0})

                        writer_test.add_summary(summary, step)

                        print("test results : ", accuracy, loss)
Exemple #8
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            dataset = svhn.SVHN()
            train_data, train_label = dataset.get_trainset()
            test_data, test_label = dataset.get_testset()
            train_data = train_data.reshape(-1, 3072)
            test_data = test_data.reshape(-1, 3072)

            test_indices = np.arange(len(test_data))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:1000]

            path = "mlp/" + str(step_limit) + "SvhnMlpRelu" + str(
                len(self.layers))
            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, len(train_data), self.batch_size),
                        range(self.batch_size, len(train_data),
                              self.batch_size)):
                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.training, self.cost,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, \
                loss, \
                accuracy = sess.run([merged, self.cost, self.accuracy],
                                    feed_dict={self.x: test_data,
                                               self.y: test_label,
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                writer_test.add_summary(summary, step)
                print("test results : ", accuracy, loss)
                saver.save(sess, path + "/model.ckpt", step)
Exemple #9
0
def run(epochs):
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        dataset = Imagenet.Cifar()
        trX, trY, teX, teY = dataset.getdata()

        filetime = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
        #path = "Networkfile/" + "convAENN_noise"
        path = "Networkfile/convAENN" + filetime
        saver = NNutils.save(path, sess)
        writer, merged = NNutils.graph(path, sess)

        test_indices = np.arange(len(teX))
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:(batch_size)]

        st_time = datetime.now()

        for i in range(epochs):
            print(i, st_time)
            for start, end in zip(range(0, len(trX), batch_size),
                                  range(batch_size, len(trX), batch_size)):
                summary, _, loss_nn, loss_ae, learning_rate, step = sess.run(
                    [merged, trainop, cost_NN, cost_AE, lr, global_step],
                    feed_dict={
                        X: trX[start:end],
                        Y: trY[start:end],
                        dropout_conv: 0.5,
                        dropout_fc: 0.5
                    })
                if step % 50 == 0:
                    writer.add_summary(summary, step)
                    print(step, datetime.now(), loss_nn, loss_ae,
                          learning_rate)

            loss_nn, loss_ae, accuracy = sess.run(
                [cost_NN, cost_AE, acc_op],
                feed_dict={
                    X: teX[test_indices],
                    Y: teY[test_indices],
                    dropout_conv: 1.0,
                    dropout_fc: 1.0
                })
            print("test results : ", accuracy, loss_nn, loss_ae)
            saver.save(sess, path + "/model.ckpt", step)

            # im = im.astype('uint8')
            # im = Image.fromarray(im[0])
            # im.save('convAENN.jpg')

        end_time = datetime.now()
        print("걸린 시간 = ", end_time - st_time)
Exemple #10
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            dataset = cifar.Cifar()
            train_data, train_label, test_data, test_label = dataset.getdata()

            test_indices = np.arange(len(test_data))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:1000]

            path = "ConvAE_2dim/50000"
            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, len(train_data), self.batch_size),
                        range(self.batch_size, len(train_data),
                              self.batch_size)):
                    summary, \
                    _, \
                    loss, \
                    step = sess.run([merged,
                                     self.training,
                                     self.cost,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.dropout_conv: 0.8,
                                               self.dropout_normal: 0.5})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, loss = sess.run(
                    [merged, self.cost],
                    feed_dict={
                        self.x: test_data[test_indices],
                        self.y: test_label[test_indices],
                        self.dropout_conv: 1.0,
                        self.dropout_normal: 1.0
                    })

                writer_test.add_summary(summary, step)
                print("test results : ", loss)
                saver.save(sess, path + "/model.ckpt", step)
Exemple #11
0
def select_dataset(name):
    x_size, y_size, train_data, train_label, test_data, test_label = 0, 0, [], [] ,[] ,[] #초기화
    if name == 'cifar':
        dataset = cifar.CIFAR()
        train_data, train_label, test_data, test_label = dataset.getdata()

        train_data = train_data.reshape(-1, 3072)
        test_data = test_data.reshape(-1, 3072)
        x_size = 3072
        y_size = 10

    elif name == 'svhn':
        dataset = svhn.SVHN()
        train_data, train_label = dataset.get_trainset()
        test_data, test_label = dataset.get_testset()

        train_data = train_data.reshape(-1, 3072)
        test_data = test_data.reshape(-1, 3072)
        x_size = 3072
        y_size = 10

    elif name == 'mnist':
        dataset = mnist.read_data_sets(flags.MNIST_DIR, one_hot=True)
        train_data, train_label, test_data, test_label = dataset.train.images, dataset.train.labels, \
                                                         dataset.test.images, dataset.test.labels
        x_size = 784
        y_size = 10

    elif name == 'news':
        trainset = fetch_20newsgroups(data_home=flags.NEWS_DIR, subset='train')
        testset = fetch_20newsgroups(data_home=flags.NEWS_DIR, subset='test')

        vectorizer = TfidfVectorizer(analyzer='word', max_features=3072)

        vectorizer.fit(trainset.data)
        train_data = vectorizer.transform(trainset.data)
        train_data = csr_matrix.todense(train_data)
        train_label = trainset.target
        train_label = NNutils.onehot(train_label, 20, list=True)
        # print(train_label.shape)

        test_data = vectorizer.transform(testset.data)
        test_data = csr_matrix.todense(test_data)
        test_label = testset.target
        test_label = NNutils.onehot(test_label, 20, list=True)

        x_size = 3072
        y_size = 20

    return Dataset(name, x_size, y_size, train_data, train_label, test_data,
                   test_label)
Exemple #12
0
    def run(self, dataset, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            name = self.info()
            path = dataset.name + "/" + str(step_limit) + name

            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            step_saved = step
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, len(dataset.train_data), self.batch_size),
                        range(self.batch_size, len(dataset.train_data),
                              self.batch_size)):
                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.opt, self.loss,
                                     self.global_step],
                                    feed_dict={self.x: dataset.train_data[start:end],
                                               self.y: dataset.train_label[start:end],
                                               self.training: True})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, \
                loss, \
                accuracy = sess.run([merged, self.loss, self.accuracy],
                                    feed_dict={self.x: dataset.test_data,
                                               self.y: dataset.test_label,
                                               self.training: False})

                writer_test.add_summary(summary, step)
                print("test results : ", accuracy, loss)
                if step - step_saved > 1000:
                    saver.save(sess, path + "/" + name + ".ckpt", step)
                    step_saved = step
Exemple #13
0
    def model_fc(self, z, layer=[7 * 7, 6 * 6, 5 * 5, 10]):
        output = z

        layer_num = 0

        for i in range(2):
            layer_num += 1
            with tf.variable_scope('fc' + str(layer_num)):
                output = NNutils.create_layer('fc',
                                              output,
                                              layer[layer_num],
                                              var_list=self.train_list)
                # output = tf.nn.dropout(output, self.dropout_conv)

        layer_num += 1
        with tf.variable_scope('fc' + str(layer_num)):
            output = NNutils.create_layer('fc',
                                          output,
                                          layer[layer_num],
                                          var_list=self.train_list,
                                          activation='none')
        y = output

        return y
Exemple #14
0
    def run(self, epochs):
        self.train_unsuper()
        #self.train_super()
        with tf.Session() as sess:

            tf.global_variables_initializer().run()
            dataset = Imagenet.Cifar()
            trX, trY, teX, teY = dataset.getdata()

            filetime = datetime.now().strftime("%Y_%m_%d_%H_%M")
            path = "Networkfile/convAE_sep_class"
            #path = "Networkfile/convAENN_sep" + filetime
            saver = NNutils.save(path, sess)
            writer, merged = NNutils.graph(path, sess)

            test_indices = np.arange(len(teX))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:(self.batch_size)]

            st_time = datetime.now()
            for i in range(epochs):
                print(i, st_time)
                for start, end in zip(
                        range(0, len(trX), self.batch_size),
                        range(self.batch_size, len(trX), self.batch_size)):
                    _ = sess.run(self.trainop_unsuper,
                                 feed_dict={
                                     self.x: trX[start:end],
                                     self.dropout_conv: 0.8,
                                     self.dropout_fc: 0.6
                                 })

                    # graph_def = graph_pb2.GraphDef()
                    # output_names = ""
                    # tf.graph_util.convert_variables_to_constants(sess, graph_def, output_names)
                    #print(cost_ae)
                output = sess.run(self.z,
                                  feed_dict={
                                      self.x: trX[start:end],
                                      self.dropout_conv: 0.8,
                                      self.dropout_fc: 0.6
                                  })
                z = tf.constant(output)
                print(z.shape)
                # cost_nn, _ = sess.run([self.cost_super, self.trainop_super],
                #                             feed_dict={self.x_nn: self.z, self.y: trY[start:end],
                #                                        self.dropout_conv: 0.8, self.dropout_fc : 0.6})
                #

                #_, loss_super, step = sess.run([trainop_super, cost_super, global_step], feed_dict={x:trX[0:0], y: trY[start:end]
                #                                                                  ,dropout_conv: 0.8, dropout_fc : 0.6})

                # if step % 50 == 0:
                #     writer.add_summary(summary, step)
                #     print(step, loss_super, loss_unsuper)

                #print(np.shape(trX))
                #summary, accuracy, loss = sess.run([merged, acc_op, cost], feed_dict={ X: teX[test_indices], Y: teY[test_indices]})
                #print(step, datetime.now(), loss_unsuper, loss_super, learning_rate)

                # loss_su, loss_un, accuracy, step = sess.run([cost_super, cost_unsuper, acc_op, global_step], feed_dict={X: teX[test_indices], Y: teY[test_indices],
                #                                                           dropout_conv : 1.0, dropout_fc : 1.0})
                # print("test results : ", accuracy, loss_super, loss_unsuper)
                # saver.save(sess, path + "/model.ckpt", step)

            end_time = datetime.now()
            print("걸린 시간 = ", end_time - st_time)
Exemple #15
0
def run(epochs):
    with tf.Session() as sess:

        tf.global_variables_initializer().run()
        dataset = Imagenet.Cifar()
        trX, trY, teX, teY = dataset.getdata()
        filetime = datetime.now().strftime("%Y_%m_%d_%H_%M_%S")
        #path = "Networkfile/convAE_sep" + "2017_03_27_20_32"
        path = "Networkfile/convAENN_sep" + filetime
        saver = NNutils.save(path, sess)
        writer, merged = NNutils.graph(path, sess)

        test_indices = np.arange(len(teX))
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:(batch_size)]

        #run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        #run_metadata = tf.RunMetadata()

        st_time = datetime.now()
        for i in range(epochs):
            print(i, st_time)
            for start, end in zip(range(0, len(trX), batch_size),
                                  range(batch_size, len(trX), batch_size)):
                summary, _, _, cost_ae, cost_nn, step = sess.run(
                    [
                        merged, trainop_unsuper, trainop_super, cost_unsuper,
                        cost_super, global_step
                    ],
                    feed_dict={
                        x: trX[start:end],
                        y: trY[start:end],
                        dropout_conv: 1.0,
                        dropout_fc: 0.8
                    })
                if step % 50 == 0:
                    writer.add_summary(summary, step)
                    print(step, datetime.now(), cost_nn, cost_ae)

            accuracy, cost_nn, cost_ae = sess.run(
                [acc_op, cost_super, cost_unsuper],
                feed_dict={
                    x: teX[test_indices],
                    y: teY[test_indices],
                    dropout_conv: 1.0,
                    dropout_fc: 1.0
                })

            saver.save(sess, path + "/model.ckpt", step)
            print("test results : ", accuracy, cost_nn, cost_ae)

            # _, loss_super, step = sess.run([trainop_super, cost_super, global_step], feed_dict={y: trY[start:end]
            #                                                                  ,dropout_conv: 0.8, dropout_fc : 0.6})

        #tf.graph_util.convert_variables_to_constants(sess, )
        # if step % 50 == 0:
        #     writer.add_summary(summary, step)
        #     print(step, loss_super, loss_unsuper)

        #print(np.shape(trX))
        #summary, accuracy, loss = sess.run([merged, acc_op, cost], feed_dict={ X: teX[test_indices], Y: teY[test_indices]})
        #print(step, datetime.now(), loss_unsuper, loss_super, learning_rate)

        # loss_su, loss_un, accuracy, step = sess.run([cost_super, cost_unsuper, acc_op, global_step], feed_dict={X: teX[test_indices], Y: teY[test_indices],
        #                                                           dropout_conv : 1.0, dropout_fc : 1.0})
        # print("test results : ", accuracy, loss_super, loss_unsuper)
        # saver.save(sess, path + "/model.ckpt", step)

        end_time = datetime.now()
        print("걸린 시간 = ", end_time - st_time)
Exemple #16
0
    def model(self, x, layer):
        image_width = 32

        output = x

        #first image
        layer_num = 0
        output1 = output

        with tf.variable_scope('1conv' + str(layer_num)):
            output1 = NNutils.create_layer('conv', output1, layer[layer_num])
            output1 = tf.nn.max_pool(output1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            layer_num += 1

        with tf.variable_scope('1conv' + str(layer_num)):
            output1 = NNutils.create_layer('conv', output1, layer[layer_num])
            output1 = tf.nn.max_pool(output1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            layer_num += 1

        with tf.variable_scope('1conv' + str(layer_num)):
            output1 = NNutils.create_layer('conv', output1, layer[layer_num])
            output1 = tf.nn.max_pool(output1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            layer_num += 1

        with tf.variable_scope('1conv' + str(layer_num)):
            output1 = NNutils.create_layer('conv', output1, layer[layer_num])
            output1 = tf.nn.max_pool(output1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            layer_num += 1

        with tf.name_scope('reshape'):
            output1 = tf.reshape(output1, [-1, image_width * image_width * layer[3]])

        #second image
        layer_num = 0
        output2 = output

        with tf.variable_scope('2conv' + str(layer_num)):
            output2 = NNutils.create_layer('conv', output2, layer[layer_num])
            output2 = tf.nn.max_pool(output2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            layer_num += 1

        with tf.variable_scope('2conv' + str(layer_num)):
            output2 = NNutils.create_layer('conv', output2, layer[layer_num])
            output2 = tf.nn.max_pool(output2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            layer_num += 1

        with tf.variable_scope('2conv' + str(layer_num)):
            output2 = NNutils.create_layer('conv', output2, layer[layer_num])
            output2 = tf.nn.max_pool(output2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            layer_num += 1

        with tf.variable_scope('2conv' + str(layer_num)):
            output2 = NNutils.create_layer('conv', output2, layer[layer_num])
            output2 = tf.nn.max_pool(output2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
            layer_num += 1

        with tf.name_scope('reshape'):
            output2 = tf.reshape(output2, [-1, image_width * image_width * layer[3]])

        #summation layer
        with tf.name_scope('concat'):
            output = tf.concat([output1, output2], 0)

        with tf.variable_scope('normal' + str(layer_num)):
            output = NNutils.create_layer('normal', output, layer[layer_num])
            layer_num += 1

        with tf.variable_scope('normal' + str(layer_num)):
            output = NNutils.create_layer('normal', output, layer[layer_num], activation='none')
            layer_num += 1

        y = output

        return y
Exemple #17
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            dataset = input_data.read_data_sets("MNIST_data/", one_hot=True)
            train_data, train_label, test_data, test_label = dataset.train.images, dataset.train.labels, \
                                                             dataset.test.images, dataset.test.labels

            test_indices = np.arange(len(test_data))
            np.random.shuffle(test_indices)
            test_indices = test_indices[0:1000]

            path = "mlp/" + str(step_limit) + "expand"
            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, len(train_data), self.batch_size),
                        range(self.batch_size, len(train_data),
                              self.batch_size)):
                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.training, self.cost,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 0.5})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                    if step == 1000:
                        graph = tf.get_default_graph()
                        old_w = graph.get_tensor_by_name('fc1/fc:0')
                        exp_w = tf.constant(0,
                                            dtype=tf.float32,
                                            shape=[784, 100],
                                            name='new_w')

                        old_w = tf.expand_dims(old_w, 1)
                        graph = tf.get_default_graph()
                        old_w = graph.get_tensor_by_name('fc1/fc:0')
                        print(old_w)

                        # w = tf.assign(old_w, )


                summary, \
                loss, \
                accuracy = sess.run([merged, self.cost, self.accuracy],
                                    feed_dict={self.x: test_data,
                                               self.y: test_label,
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                writer_test.add_summary(summary, step)
                print("test results : ", accuracy, loss)
Exemple #18
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            # dataset = fetch_20newsgroups(data_home="C:/Projects/Data/20newsgroups", subset='all')
            trainset = fetch_20newsgroups(data_home=flags.NEWS_DIR,
                                          subset='train')
            testset = fetch_20newsgroups(data_home=flags.NEWS_DIR,
                                         subset='test')

            vectorizer = TfidfVectorizer(analyzer='word', max_features=3072)

            vectorizer.fit(trainset.data)
            train_data = vectorizer.transform(trainset.data)
            train_data = csr_matrix.todense(train_data)
            train_label = trainset.target
            train_label = NNutils.onehot(20, train_label)  #NNutil 바뀐 이후 수정안됨
            # print(train_label.shape)

            test_data = vectorizer.transform(testset.data)
            test_data = csr_matrix.todense(test_data)
            test_label = testset.target
            test_label = NNutils.onehot(20, test_label)
            # print(test_label.shape)

            # dataset = mnist.read_data_sets("C:/Projects/Data/MNIST_data", one_hot=True)
            # train_data, train_label, test_data, test_label = dataset.train.images, dataset.train.labels, \
            #                               dataset.test.images, dataset.test.labels

            name = self.info()
            path = "news/" + str(step_limit) + name
            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(
                        range(0, len(train_data), self.batch_size),
                        range(self.batch_size, len(train_data),
                              self.batch_size)):
                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.training, self.loss,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.droprate_ph: self.droprate_train})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, \
                loss, \
                accuracy = sess.run([merged, self.loss, self.accuracy],
                                    feed_dict={self.x: test_data,
                                               self.y: test_label,
                                               self.droprate_ph: self.droprate_test})

                writer_test.add_summary(summary, step)
                print("test results : ", accuracy, loss)
                saver.save(sess, path + "/" + name + ".ckpt", step)
Exemple #19
0
    def run(self, step_limit):
        self.train()

        with tf.Session() as sess:
            tf.global_variables_initializer().run()

            # dataset = fetch_20newsgroups(data_home="C:/Projects/Data/20newsgroups", subset='all')
            trainset = fetch_20newsgroups(data_home="C:/Projects/Data/20newsgroups", subset='train')
            testset = fetch_20newsgroups(data_home="C:/Projects/Data/20newsgroups", subset='test')

            vectorizer = TfidfVectorizer(analyzer='word', max_features=3072)

            vectorizer.fit(trainset.data)
            train_data = vectorizer.transform(trainset.data)
            train_data = csr_matrix.todense(train_data)
            train_label = trainset.target
            train_label = NNutils.onehot(20, train_label)
            # print(train_label.shape)

            test_data = vectorizer.transform(testset.data)
            test_data = csr_matrix.todense(test_data)
            test_label = testset.target
            test_label = NNutils.onehot(20, test_label)

            # test_indices = np.arange(len(test_data))
            # np.random.shuffle(test_indices)
            # test_indices = test_indices[0:1000]

            name = self.info()
            path = "news/" + str(step_limit) + name

            saver = NNutils.save(path, sess)
            writer, writer_test, merged = NNutils.graph(path, sess)

            step = sess.run(self.global_step)
            while step < step_limit:
                print("step :", step)
                for start, end in zip(range(0, len(train_data), self.batch_size),
                                      range(self.batch_size, len(train_data), self.batch_size)):
                    summary, \
                    _, loss, \
                    step = sess.run([merged,
                                     self.training, self.cost,
                                     self.global_step],
                                    feed_dict={self.x: train_data[start:end],
                                               self.y: train_label[start:end],
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                    if step % 50 == 0:
                        writer.add_summary(summary, step)
                        print(step, datetime.now(), loss)

                summary, \
                loss, \
                accuracy = sess.run([merged, self.cost, self.accuracy],
                                    feed_dict={self.x: test_data,
                                               self.y: test_label,
                                               self.dropout_conv: 1.0,
                                               self.dropout_normal: 1.0})

                writer_test.add_summary(summary, step)
                print("test results : ", accuracy, loss)
                saver.save(sess, path + "/" + name + ".ckpt", step)
Exemple #20
0
def run(epochs):
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        dataset = Imagenet.Cifar()
        trX, trY, teX, teY = dataset.getdata()


        print(teY.shape)

        filetime = datetime.now().strftime("%Y_%m_%d_%H_%M")
        path = "convAE/" + "rgb2rgb"
        #path = "Networkfile/convAE" + filetime
        saver = NNutils.save(path, sess)
        writer, writer_test, merged = NNutils.graph(path, sess)

        test_indices = np.arange(len(teX))
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:batch_size]

        st_time = datetime.now()

        for i in range(epochs):
            print(i, st_time)
            for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX), batch_size)):
                summary, _,\
                loss, learning_rate,\
                step = sess.run([merged, trainop,
                                 cost, lr,
                                 global_step],
                                feed_dict={ X: trX[start:end],
                                            dropout_conv : 0.8, dropout_fc : 0.5})
                if step % 50 == 0:
                    writer.add_summary(summary, step)
                    print(step, datetime.now(), loss, learning_rate)


            loss, results = sess.run([cost, Z], feed_dict={X: teX,
                                                           dropout_conv: 1.0,
                                                           dropout_fc: 1.0,
                                                           })
            print("test results : ", loss)
            saver.save(sess, path + "/model.ckpt", step)


            #
            # image = image.astype('uint8')
            # im = Image.fromarray(image[0])
            # im.show()

            # image = teX.astype('uint8')
            # im = Image.fromarray(image[0])
            # im.show()

        end_time = datetime.now()
        print("걸린 시간 = ", end_time - st_time)

        #평가
        # SVM 학습
        loss, x_train = sess.run([cost, Z], feed_dict={X: trX,
                                                       dropout_conv: 1.0,
                                                       dropout_fc: 1.0,
                                                       })
        x_train = x_train.reshape(len(x_train), -1)
        y_train = np.argmax(trY, 1)


        print(x_train.shape)
        clf = svm.LinearSVC(max_iter=500, random_state=2)
        clf.fit(x_train, y_train)

        # SVM 예측 정확도 계산
        loss, x_test = sess.run([cost, Z], feed_dict={X: teX,
                                                       dropout_conv: 1.0,
                                                       dropout_fc: 1.0,
                                                       })
        x_test = x_test.reshape(len(x_test), -1)
        y_test = np.argmax(teY, 1)

        accuracy = 0
        iteration = 50
        for i in range(iteration):
            print(i)
            clf = svm.LinearSVC(max_iter=200)
            clf.fit(x_train, y_train)
            acc = clf.score(x_test, y_test)
            print(acc)
            accuracy += acc

        accuracy /= iteration
        print(accuracy)
Exemple #21
0
def run(epochs):
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        dataset = Imagenet.Cifar()
        trX, trY, teX, teY = dataset.getdata()
        train_gray, test_gray = dataset.getgray()
        #train_gray = train_gray.reshape(-1, 32, 32, 1)
        #test_gray = test_gray.reshape(-1, 32, 32, 1)

        filetime = datetime.now().strftime("%Y_%m_%d_%H_%M")
        path = "ConvAE/" + "rgb2gray"
        #path = "Networkfile/convAE" + filetime
        saver = NNutils.save(path, sess)
        writer, writer_test, merged = NNutils.graph(path, sess)

        test_indices = np.arange(len(teX))
        np.random.shuffle(test_indices)
        test_indices = test_indices[0:(batch_size)]  #deconv때문에 batchsize보다 크면 문제가 발생한다.

        st_time = datetime.now()

        for i in range(epochs):
            print(i, st_time)
            for start, end in zip(range(0, len(trX), batch_size), range(batch_size, len(trX), batch_size)):
                summary,\
                _, loss, \
                learning_rate, step = sess.run([merged,
                                                trainop, cost,
                                                # trainop_nn, cost_nn,
                                                lr, global_step],
                                               feed_dict={ X: trX[start:end],
                                                           # Y: trY[start:end],
                                                           x_gray: train_gray[start:end],
                                                           dropout_conv : 0.8, dropout_fc : 1.0})

                #print(loss)
                if step % 50 == 0:
                    writer.add_summary(summary, step)
                    print(step, datetime.now(), loss, learning_rate)


            loss, arr = sess.run([cost, X_],
                            feed_dict={X: teX,
                                       # Y: teY[0:batch_size],
                                       x_gray: test_gray,
                                       dropout_conv : 1.0, dropout_fc : 1.0})

            # arr_uint = arr.astype('uint8')
            # #arr_uint = arr_uint.reshape(-1, 32 * 32 * 1)
            # im = Image.fromarray(arr_uint[0])
            # im.show()

            print("test results : ", loss)
            saver.save(sess, path + "/model.ckpt", step)


        end_time = datetime.now()
        print("걸린 시간 = ", end_time - st_time)

        # 평가
        # SVM 학습
        x_train = sess.run(Z, feed_dict={X: trX,
                                                       x_gray: test_gray,
                                                       dropout_conv: 1.0,
                                                       dropout_fc: 1.0,
                                                       })
        x_train = x_train.reshape(len(x_train), -1)
        y_train = np.argmax(trY, 1)

        print(x_train.shape)



        # SVM 예측 정확도 계산
        x_test = sess.run(Z, feed_dict={X: teX,
                                                      x_gray: test_gray,
                                                      dropout_conv: 1.0,
                                                      dropout_fc: 1.0,
                                                      })
        x_test = x_test.reshape(len(x_test), -1)
        y_test = np.argmax(teY, 1)

        accuracy = 0
        iteration = 50
        for i in range(iteration):
            print(i)
            clf = svm.LinearSVC(max_iter=200)
            clf.fit(x_train, y_train)
            acc = clf.score(x_test, y_test)
            print(acc)
            accuracy += acc

        accuracy /= iteration
        print(accuracy)
Exemple #22
0
                        buffer[i][2] = 1  #점수 줌

                if episode % 100 == 0:
                    env.render()

                reward_all += reward
            #

            for key, action, value in buffer:
                mem.add(key, action, value)

            # print(mem.buffer)
            x = np.array(list(mem.buffer.keys()))
            y = np.array(list(mem.buffer.values()))
            y = y[:, :, 2]
            x = NNutils.onehot(x, env.observation_space.n, list=True)

            # print("input & label",x.shape, y.shape)
            # print("x, y :", x, y)
            _, loss = net.update(sess, x, y)
            # print("loss", loss)

            graph.append(reward_all)

        x = np.array(list(mem.buffer.keys()))
        y = np.array(list(mem.buffer.values()))
        y = y[:, :, 2]
        print(x, y)
        print("Success rate: " + str(sum(graph) / num_episodes))
        plt.bar(range(len(graph)), graph, color="blue")
        plt.show()
Exemple #23
0
import scipy.sparse
from sklearn.datasets import fetch_20newsgroups
# from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.feature_extraction.text import TfidfVectorizer
# from datetime import datetime


#20newsgroups 셋을 불러옴
dataset = fetch_20newsgroups(data_home="C:/YWK/Projects/Python/Data/20newsgroups", subset='all')    #이걸로 하지말고 train에 맞춰서 해보자.
trainset = fetch_20newsgroups(data_home="C:/YWK/Projects/Python/Data/20newsgroups", subset='train')
testset = fetch_20newsgroups(data_home="C:/YWK/Projects/Python/Data/20newsgroups", subset='test')

#벡터화
vectorizer = TfidfVectorizer(analyzer='word', max_features=1000)


vectorizer.fit(dataset.data)
train_data = vectorizer.transform(trainset.data)
train_data = scipy.sparse.csr_matrix.todense(train_data)
train_label = trainset.target
train_label =  NNutils.onehot(20, train_label)
print(train_data.shape)
print(train_data[0])

# test_data = vectorizer.fit_transform(testset.data)
# test_data = scipy.sparse.csr_matrix.todense(test_data)
# test_label = testset.target
# print(test_data[0])


Exemple #24
0
# trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels
dataset = Imagenet.Cifar()
trX, trY, teX, teY = dataset.getdata()
trX = trX.reshape(-1, 32 * 32 * 3)
teX = teX.reshape(-1, 32 * 32 * 3)

print(tf.shape(cost))

with tf.Session() as sess:
    tf.global_variables_initializer().run()
    batch_size = 128
    st_time = datetime.now()
    print(st_time)

    savepath = "Networkfile/NN"
    saver = NNutils.save(savepath, sess)
    writer, merge = NNutils.graph("Networkfile/NN", sess)

    for i in range(1):
        for start, end in zip(range(0, len(trX), batch_size),
                              range(batch_size, len(trY), batch_size)):
            summary, _, loss, step = sess.run(
                [merge, train_op, cost, global_step],
                feed_dict={
                    X: trX[start:end],
                    Y: trY[start:end]
                })
            print(step, loss)
            writer.add_summary(summary, step)
        #saver.save(sess, savepath + "/model.ckpt", step)