Ejemplo n.º 1
0
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._learning_rate = tf.placeholder(tf.float32,
                                             shape=[],
                                             name="learn_rate")

        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=self._learning_rate,
            beta1=0.9,
            beta2=0.999,
            epsilon=1e-08,
            use_locking=False,
            name='Adam')
        self._writer = tf.summary.FileWriter('./summary')

        self.out_real = self._model.get_reconstruction()[0]
        self.out_image = self._model.get_reconstruction()[1]
        #self.output = tf.concat([self.out_real, self.out_image], axis=1)

        self.Trans_real = self._model.get_Transmit()[0]
        self.Trans_image = self._model.get_Transmit()[1]

        self.loss_real = tf.math.subtract(self.Trans_real, self.out_real)
        self.loss_image = tf.math.subtract(self.Trans_image, self.out_image)
Ejemplo n.º 2
0
 def __init__(self, n_epochs, tr_batch_size, dataset_params, encoder_params, channel_params, decoder_params, decision_params, optimizer_params):
     self._n_epochs = n_epochs
     self._batch_size = tr_batch_size
     self._dataset = DatasetMNIST(val_size=10000)
     self._model = Model(dataset_params=dataset_params, encoder_params=encoder_params, channel_params=channel_params, decoder_params=decoder_params, decision_params=decision_params)
     self._optimizer = tf.train.AdamOptimizer(learning_rate=optimizer_params['lr'])      ## Tried GradientDescentOptimizer, but it seems hard to converge, but the paper mentioned SGD with Adam optimizer, so I thing the way is SGD and the optimizer is Adam
     self._writer = tf.summary.FileWriter('./summary')
Ejemplo n.º 3
0
 def __init__(self, n_epochs, tr_batch_size, optimizer_params):
     self._n_epochs = n_epochs
     self._batch_size = tr_batch_size
     self._dataset = DatasetMNIST(val_size=10000)
     self._model = Model()
     self._optimizer = tf.train.AdamOptimizer(learning_rate=optimizer_params['lr'])
     self._writer = tf.summary.FileWriter('./summary')
Ejemplo n.º 4
0
class Trainer(object):
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(learning_rate=optimizer_params['lr'])
        self._writer = tf.summary.FileWriter('./summary')

    def train_model(self):
        data = self._dataset.load_data()

        # loss function     ----right now just a simple mean function
        loss = tf.reduce_mean(self._model.get_reconstruction()[0])

        tf.summary.scalar('Loss', loss)
        training_op = self._optimizer.minimize(loss)
        saver = tf.train.Saver()
        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()
            print("Training Start")
            for epoch in range(0, self._n_epochs):
                train_loss = 0
                for X_batch_real, y_batch_real, X_batch_image, y_batch_image in self._dataset.shuffle_batch(data['X_train_real'], data['y_train_real'], data['X_train_image'], data['y_train_image'], self._batch_size):
                    _, loss_batch = sess.run([training_op, loss], feed_dict={self._model.X_real: X_batch_real, self._model.X_image: X_batch_image})
                    train_loss += loss_batch
                    #print("alph is: ", self._model.get_para().eval())

                summary = sess.run(summary_op, feed_dict={self._model.X_real: data['X_valid_real'], self._model.X_image: data['X_valid_image']})
                self._writer.add_summary(summary=summary, global_step=epoch)
                print(epoch, "Training Loss:", train_loss, "Validation Loss",
                loss.eval(feed_dict={self._model.X_real: data['X_valid_real'], self._model.X_image: data['X_valid_image']}))
Ejemplo n.º 5
0
 def __init__(self, n_epochs, tr_batch_size, dataset_params, encoder_params,
              decoder_params, optimizer_params):
     self._n_epochs = n_epochs
     self._batch_size = tr_batch_size
     self._dataset = DatasetMNIST(val_size=10000)
     self._model = Model(dataset_params=dataset_params,
                         encoder_params=encoder_params,
                         decoder_params=decoder_params)
     self._optimizer = tf.train.GradientDescentOptimizer(
         learning_rate=optimizer_params['lr'])
     self._writer = tf.summary.FileWriter('./summary')
Ejemplo n.º 6
0
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=optimizer_params['lr'])
        self._writer = tf.summary.FileWriter('./summary')

        self.out_real = self._model.get_reconstruction()[0]
        self.out_image = self._model.get_reconstruction()[1]
        self.loss_real = tf.math.subtract(self._model.y_real, self.out_real)
        self.loss_image = tf.math.subtract(self._model.y_image, self.out_image)
Ejemplo n.º 7
0
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._learning_rate = tf.placeholder(tf.float32,
                                             shape=[],
                                             name="learn_rate")

        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=self._learning_rate)
        #AdaBoundOptimizer(learning_rate=0.01, final_lr=0.01, beta1=0.9, beta2=0.999, amsbound=False)
        self._writer = tf.summary.FileWriter('./summary')

        self.out_real = self._model.get_reconstruction()[0]
        self.out_image = self._model.get_reconstruction()[1]
        self.loss_real = tf.math.subtract(self._model.y_real, self.out_real)
        self.loss_image = tf.math.subtract(self._model.y_image, self.out_image)
        print("self._model.y_real", self._model.y_real.shape)
        print("self.out_real", self.out_real.shape)
Ejemplo n.º 8
0
class Trainer(object):
    def __init__(self, n_epochs, tr_batch_size, dataset_params, encoder_params,
                 decoder_params, optimizer_params):
        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model(dataset_params=dataset_params,
                            encoder_params=encoder_params,
                            decoder_params=decoder_params)
        self._optimizer = tf.train.GradientDescentOptimizer(
            learning_rate=optimizer_params['lr'])
        self._writer = tf.summary.FileWriter('./summary')

    def train_model(self):
        data = self._dataset.load_data()
        loss = tf.reduce_mean(
            tf.square(self._model.X - self._model.get_reconstruction()))
        tf.summary.scalar('Loss', loss)
        training_op = self._optimizer.grad(loss)

        summary_op = tf.summary.merge_all()
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()
            for epoch in range(self._n_epochs):
                train_loss = 0
                for X_batch, y_batch in self._dataset.shuffle_batch(
                        data['X_train'], data['y_train'], self._batch_size):
                    _, loss_batch = sess.run(
                        [training_op, loss],
                        feed_dict={self._model.X: X_batch})
                    train_loss += loss_batch

                summary = sess.run(summary_op,
                                   feed_dict={self._model.X: data['X_valid']})
                self._writer.add_summary(summary=summary, global_step=epoch)
                # TODO(ali): Clean this code for the LOVE PF GOD!!!
                print(epoch, "Training Loss:", train_loss / 250,
                      "Validation Loss",
                      loss.eval(feed_dict={self._model.X: data['X_valid']}))
Ejemplo n.º 9
0
class Trainer(object):
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=optimizer_params['lr'])
        self._writer = tf.summary.FileWriter('./summary')

        self.out_real = self._model.get_reconstruction()[0]
        self.out_image = self._model.get_reconstruction()[1]
        self.loss_real = tf.math.subtract(self._model.y_real, self.out_real)
        self.loss_image = tf.math.subtract(self._model.y_image, self.out_image)

    def train_model(self):
        data = self._dataset.load_data()
        # loss function
        loss = tf.reduce_sum(
            tf.math.add(tf.math.square(self.loss_real),
                        tf.math.square(self.loss_image)))

        # Q function
        x_mag = tf.reduce_sum(
            tf.math.add(tf.math.square(self._model.y_real),
                        tf.math.square(self._model.y_image)))
        Q = tf.math.divide(x_mag, loss)

        tf.summary.scalar('Loss', loss)
        training_op = self._optimizer.minimize(loss)
        saver = tf.train.Saver()
        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()
            print("Training Start")

            # variables_names = [v.name for v in tf.trainable_variables()]
            # values = sess.run(variables_names)
            # for k, v in zip(variables_names, values):
            #     print("Variable: ", k)
            #     print("Shape: ", v.shape)
            #     print(v)

            # print("predict:", self._model.get_reconstruction()[0].eval(
            #     feed_dict={self._model.X_real: data['X_test_real_1'], self._model.X_image: data['X_test_image_1'],
            #                self._model.y_real: data['y_test_real_1'], self._model.y_image: data['y_test_image_1']}))

            for epoch in range(0, self._n_epochs):
                train_loss = 0
                for X_batch_real, y_batch_real, X_batch_image, y_batch_image in self._dataset.shuffle_batch(
                        data['X_train_real'], data['y_train_real'],
                        data['X_train_image'], data['y_train_image'],
                        self._batch_size):
                    _, loss_batch = sess.run(
                        [training_op, loss],
                        feed_dict={
                            self._model.X_real: X_batch_real,
                            self._model.X_image: X_batch_image,
                            self._model.y_real: y_batch_real,
                            self._model.y_image: y_batch_image
                        })
                    train_loss += loss_batch

                summary = sess.run(summary_op,
                                   feed_dict={
                                       self._model.X_real:
                                       data['X_valid_real_1'],
                                       self._model.X_image:
                                       data['X_valid_image_1'],
                                       self._model.y_real:
                                       data['y_valid_real_1'],
                                       self._model.y_image:
                                       data['y_valid_image_1']
                                   })
                self._writer.add_summary(summary=summary, global_step=epoch)
                print(
                    epoch, "Training Loss:", train_loss, "Validation Loss",
                    loss.eval(
                        feed_dict={
                            self._model.X_real: data['X_valid_real_1'],
                            self._model.X_image: data['X_valid_image_1'],
                            self._model.y_real: data['y_valid_real_1'],
                            self._model.y_image: data['y_valid_image_1']
                        }), "Q factor:",
                    Q.eval(
                        feed_dict={
                            self._model.X_real: data['X_valid_real_1'],
                            self._model.X_image: data['X_valid_image_1'],
                            self._model.y_real: data['y_valid_real_1'],
                            self._model.y_image: data['y_valid_image_1']
                        }))

                # "predict:", self._model.get_middel_para()[0].eval(
                #     feed_dict={self._model.X_real: data['X_valid_real'], self._model.X_image: data['X_valid_image'],
                #                self._model.y_real: data['y_valid_real'], self._model.y_image: data['y_valid_image']})

            print("Testing")
            print(
                "Power = 1", "Q factor:",
                Q.eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_1'],
                        self._model.X_image: data['X_test_image_1'],
                        self._model.y_real: data['y_test_real_1'],
                        self._model.y_image: data['y_test_image_1']
                    }), "predict:",
                self._model.get_middel_para()[0].eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_1'],
                        self._model.X_image: data['X_test_image_1'],
                        self._model.y_real: data['y_test_real_1'],
                        self._model.y_image: data['y_test_image_1']
                    }))
            print(
                "Power = 2", "Q factor:",
                Q.eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_2'],
                        self._model.X_image: data['X_test_image_2'],
                        self._model.y_real: data['y_test_real_2'],
                        self._model.y_image: data['y_test_image_2']
                    }), "predict:",
                self._model.get_middel_para()[0].eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_2'],
                        self._model.X_image: data['X_test_image_2'],
                        self._model.y_real: data['y_test_real_2'],
                        self._model.y_image: data['y_test_image_2']
                    }))
            print(
                "Power = 3", "Q factor:",
                Q.eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_3'],
                        self._model.X_image: data['X_test_image_3'],
                        self._model.y_real: data['y_test_real_3'],
                        self._model.y_image: data['y_test_image_3']
                    }), "predict:",
                self._model.get_middel_para()[0].eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_3'],
                        self._model.X_image: data['X_test_image_3'],
                        self._model.y_real: data['y_test_real_3'],
                        self._model.y_image: data['y_test_image_3']
                    }))
            print(
                "Power = 4", "Q factor:",
                Q.eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_4'],
                        self._model.X_image: data['X_test_image_4'],
                        self._model.y_real: data['y_test_real_4'],
                        self._model.y_image: data['y_test_image_4']
                    }), "predict:",
                self._model.get_middel_para()[0].eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_4'],
                        self._model.X_image: data['X_test_image_4'],
                        self._model.y_real: data['y_test_real_4'],
                        self._model.y_image: data['y_test_image_4']
                    }))
            print(
                "Power = 5", "Q factor:",
                Q.eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_5'],
                        self._model.X_image: data['X_test_image_5'],
                        self._model.y_real: data['y_test_real_5'],
                        self._model.y_image: data['y_test_image_5']
                    }), "predict:",
                self._model.get_middel_para()[0].eval(
                    feed_dict={
                        self._model.X_real: data['X_test_real_5'],
                        self._model.X_image: data['X_test_image_5'],
                        self._model.y_real: data['y_test_real_5'],
                        self._model.y_image: data['y_test_image_5']
                    }))
Ejemplo n.º 10
0
 def __init__(self, n_epochs, tr_batch_size, optimizer_params):
     self._n_epochs = n_epochs
     self._batch_size = tr_batch_size
     self._dataset = DatasetMNIST(val_size=10000)
     self._model = Model()
Ejemplo n.º 11
0
class Trainer(object):
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        # self._optimizer = tf.train.AdamOptimizer(learning_rate=optimizer_params['lr'])
        # self._writer = tf.summary.FileWriter('./summary')

        # self.out_real = self._model.get_reconstruction()[0]
        # self.out_image = self._model.get_reconstruction()[1]
        # self.loss_real = tf.math.subtract(self._model.y_real, self.out_real)
        # self.loss_image = tf.math.subtract(self._model.y_image, self.out_image)

    def train_model(self):
        data = self._dataset.load_data()
        # # loss function
        # loss = tf.reduce_sum(tf.math.add(tf.math.square(self.loss_real), tf.math.square(self.loss_image)))
        #
        # # Q function
        # x_mag = tf.reduce_sum(tf.math.add(tf.math.square(self._model.y_real), tf.math.square(self._model.y_image)))
        # Q = tf.math.divide(x_mag, loss)
        #
        # tf.summary.scalar('Loss', loss)
        # training_op = self._optimizer.minimize(loss)
        # saver = tf.train.Saver()
        # summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()
            print("Training Start")
            writer = tf.summary.FileWriter('./graphs', sess.graph)
            # variables_names = [v.name for v in tf.trainable_variables()]
            # values = sess.run(variables_names)
            # for k, v in zip(variables_names, values):
            #     print("Variable: ", k)
            #     print("Shape: ", v.shape)
            #     print(v)

            # tvars = tf.trainable_variables()
            # tvars_vals = sess.run(tvars)
            #
            # for var, val in zip(tvars, tvars_vals):
            #     print(var.name, val)

            # print("predict:", self._model.get_middle_para()[0].eval(
            #     feed_dict={self._model.X_real: data['X_test_real_1'], self._model.X_image: data['X_test_image_1'],
            #                self._model.y_real: data['y_test_real_1'], self._model.y_image: data['y_test_image_1']}))
            real = self._model.get_middle_para()[0].eval(
                feed_dict={
                    self._model.X_real: data['X_test_real_1'],
                    self._model.X_image: data['X_test_image_1'],
                    self._model.y_real: data['y_test_real_1'],
                    self._model.y_image: data['y_test_image_1']
                })

            # print("predict:", self._model.get_middle_para()[1].eval(
            #     feed_dict={self._model.X_real: data['X_test_real_1'], self._model.X_image: data['X_test_image_1'],
            #                self._model.y_real: data['y_test_real_1'], self._model.y_image: data['y_test_image_1']}))
            image = self._model.get_middle_para()[1].eval(
                feed_dict={
                    self._model.X_real: data['X_test_real_1'],
                    self._model.X_image: data['X_test_image_1'],
                    self._model.y_real: data['y_test_real_1'],
                    self._model.y_image: data['y_test_image_1']
                })

            np.savetxt('data_real.csv', real, delimiter=',')
            np.savetxt('data_image.csv', image, delimiter=',')
Ejemplo n.º 12
0
class Trainer(object):
    def __init__(self, n_epochs, tr_batch_size, dataset_params, encoder_params,
                 channel_params, decoder_params, decision_params,
                 optimizer_params):
        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model(dataset_params=dataset_params,
                            encoder_params=encoder_params,
                            channel_params=channel_params,
                            decoder_params=decoder_params,
                            decision_params=decision_params)
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=optimizer_params['lr']
        )  ## Tried GradientDescentOptimizer, but it seems hard to converge, but the paper mentioned SGD with Adam optimizer, so I thing the way is SGD and the optimizer is Adam
        self._writer = tf.summary.FileWriter('./summary')

    def train_model(self):
        data = self._dataset.load_data()
        # loss function
        #print("self._model.get_reconstruction()", self._model.get_reconstruction().shape)
        self._decoding_mess = tf.slice(self._model.X, [0, 6, 0], [-1, 1, -1])
        #print("self._decoding_mess shape is: ", self._decoding_mess.shape)
        #print("self._model.get_reconstruction shape is: ", self._model.get_reconstruction().shape)
        loss = tf.reduce_mean(-1.0 * tf.reduce_sum(
            self._decoding_mess *
            tf.log(self._model.get_reconstruction() + 1e-8), 2))
        #loss = tf.reduce_mean(tf.keras.backend.categorical_crossentropy(self._model.X, self._model.get_reconstruction()))

        X_batch_input = tf.math.argmax(self._decoding_mess, 2)
        correct_predict = tf.equal(
            X_batch_input, tf.math.argmax(self._model.get_reconstruction(), 2))
        accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))

        tf.summary.scalar('Loss', loss)
        training_op = self._optimizer.minimize(loss)
        saver = tf.train.Saver()
        summary_op = tf.summary.merge_all()
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()
            print("Training Start")
            #new_saver = tf.train.import_meta_graph('C:/ELEC 499/Try/Code_0228-59.meta')
            #new_saver.restore(sess, 'C:/ELEC 499/Try/Code_0228-59')
            for epoch in range(0, self._n_epochs):
                train_loss = 0
                if epoch <= 20:
                    for X_batch, y_batch in self._dataset.shuffle_batch(
                            data['X_train'], data['y_train'],
                            self._batch_size):
                        _, loss_batch = sess.run([training_op, loss],
                                                 feed_dict={
                                                     self._model.X: X_batch,
                                                     self._model.Noise: 0.1774
                                                 })
                        #print("self._model.get_reconstruction() is: ", self._model.get_reconstruction().eval(feed_dict={self._model.X: X_batch, self._model.Noise: 0.1774}))
                        train_loss += loss_batch

                    summary = sess.run(summary_op,
                                       feed_dict={
                                           self._model.X: data['X_valid'],
                                           self._model.Noise: 0.1774
                                       })
                    self._writer.add_summary(summary=summary,
                                             global_step=epoch)
                    # TODO(ali): Clean this code for the LOVE PF GOD!!!
                    print(
                        epoch, "Training Loss:", train_loss, "Validation Loss",
                        loss.eval(
                            feed_dict={
                                self._model.X: data['X_valid'],
                                self._model.Noise: 0.1774
                            }), "Accuracy", 100 * accuracy.eval(
                                feed_dict={
                                    self._model.X: data['X_valid'],
                                    self._model.Noise: 0.1774
                                }))

                elif epoch <= 40:
                    for X_batch, y_batch in self._dataset.shuffle_batch(
                            data['X_train'], data['y_train'], 100):
                        _, loss_batch = sess.run([training_op, loss],
                                                 feed_dict={
                                                     self._model.X: X_batch,
                                                     self._model.Noise: 0.1774
                                                 })
                        train_loss += loss_batch

                    summary = sess.run(summary_op,
                                       feed_dict={
                                           self._model.X: data['X_valid'],
                                           self._model.Noise: 0.1774
                                       })
                    self._writer.add_summary(summary=summary,
                                             global_step=epoch)
                    # TODO(ali): Clean this code for the LOVE PF GOD!!!
                    print(
                        epoch, "Training Loss:", train_loss, "Validation Loss",
                        loss.eval(
                            feed_dict={
                                self._model.X: data['X_valid'],
                                self._model.Noise: 0.1774
                            }), "Accuracy", 100 * accuracy.eval(
                                feed_dict={
                                    self._model.X: data['X_valid'],
                                    self._model.Noise: 0.1774
                                }))

                else:
                    for X_batch, y_batch in self._dataset.shuffle_batch(
                            data['X_train'], data['y_train'], 200):
                        _, loss_batch = sess.run([training_op, loss],
                                                 feed_dict={
                                                     self._model.X: X_batch,
                                                     self._model.Noise: 0.1774
                                                 })
                        train_loss += loss_batch

                    summary = sess.run(summary_op,
                                       feed_dict={
                                           self._model.X: data['X_valid'],
                                           self._model.Noise: 0.1774
                                       })
                    self._writer.add_summary(summary=summary,
                                             global_step=epoch)
                    # TODO(ali): Clean this code for the LOVE PF GOD!!!
                    print(
                        epoch, "Training Loss:", train_loss, "Validation Loss",
                        loss.eval(
                            feed_dict={
                                self._model.X: data['X_valid'],
                                self._model.Noise: 0.1774
                            }), "Accuracy", 100 * accuracy.eval(
                                feed_dict={
                                    self._model.X: data['X_valid'],
                                    self._model.Noise: 0.1774
                                }))
                saver.save(sess,
                           "C:\ELEC 499\Try\ML-research",
                           global_step=epoch)
            # Testing
            Accuracy_SNR_0 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_0 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.5
                })
            Accuracy_SNR_0 = 100 * Accuracy_SNR_0 / TEST_MESS
            print("SNR = 0 test accuracy is: ", 100000 * Accuracy_SNR_0)
            np.save('SNR0_Accuracy', 100000 * Accuracy_SNR_0)

            Accuracy_SNR_1 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_1 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.4456
                })
            Accuracy_SNR_1 = 100 * Accuracy_SNR_1 / TEST_MESS
            print("SNR = 1 test accuracy is: ", 100000 * Accuracy_SNR_1)
            np.save('SNR1_Accuracy', 100000 * Accuracy_SNR_1)

            Accuracy_SNR_2 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_2 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.397
                })
            Accuracy_SNR_2 = 100 * Accuracy_SNR_2 / TEST_MESS
            print("SNR = 2 test accuracy is: ", 100000 * Accuracy_SNR_2)
            np.save('SNR2_Accuracy', 100000 * Accuracy_SNR_2)

            Accuracy_SNR_3 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_3 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.354
                })
            Accuracy_SNR_3 = 100 * Accuracy_SNR_3 / TEST_MESS
            print("SNR = 3 test accuracy is: ", 100000 * Accuracy_SNR_3)
            np.save('SNR3_Accuracy', 100000 * Accuracy_SNR_3)

            Accuracy_SNR_4 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_4 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.315
                })
            Accuracy_SNR_4 = 100 * Accuracy_SNR_4 / TEST_MESS
            print("SNR = 4 test accuracy is: ", 100000 * Accuracy_SNR_4)
            np.save('SNR4_Accuracy', 100000 * Accuracy_SNR_4)

            Accuracy_SNR_5 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_5 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.281
                })
            Accuracy_SNR_5 = 100 * Accuracy_SNR_5 / TEST_MESS
            print("SNR = 5 test accuracy is: ", 100000 * Accuracy_SNR_5)
            np.save('SNR5_Accuracy', 100000 * Accuracy_SNR_5)

            Accuracy_SNR_6 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_6 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.2506
                })
            Accuracy_SNR_6 = 100 * Accuracy_SNR_6 / TEST_MESS
            print("SNR = 6 test accuracy is: ", 100000 * Accuracy_SNR_6)
            np.save('SNR6_Accuracy', 100000 * Accuracy_SNR_6)

            Accuracy_SNR_7 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_7 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.2233
                })
            Accuracy_SNR_7 = 100 * Accuracy_SNR_7 / TEST_MESS
            print("SNR = 7 test accuracy is: ", 100000 * Accuracy_SNR_7)
            np.save('SNR7_Accuracy', 100000 * Accuracy_SNR_7)

            Accuracy_SNR_8 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_8 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.199
                })
            Accuracy_SNR_8 = 100 * Accuracy_SNR_8 / TEST_MESS
            print("SNR = 8 test accuracy is: ", 100000 * Accuracy_SNR_8)
            np.save('SNR8_Accuracy', 100000 * Accuracy_SNR_8)

            Accuracy_SNR_9 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_9 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.1774
                })
            Accuracy_SNR_9 = 100 * Accuracy_SNR_9 / TEST_MESS
            print("SNR = 9 test accuracy is: ", 100000 * Accuracy_SNR_9)
            np.save('SNR9_Accuracy', 100000 * Accuracy_SNR_9)

            Accuracy_SNR_10 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_10 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.158
                })
            Accuracy_SNR_10 = 100 * Accuracy_SNR_10 / TEST_MESS
            print("SNR = 10 test accuracy is: ", 100000 * Accuracy_SNR_10)
            np.save('SNR10_Accuracy', 100000 * Accuracy_SNR_10)

            Accuracy_SNR_11 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_11 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.141
                })
            Accuracy_SNR_11 = 100 * Accuracy_SNR_11 / TEST_MESS
            print("SNR = 11 test accuracy is: ", 100000 * Accuracy_SNR_11)
            np.save('SNR11_Accuracy', 100000 * Accuracy_SNR_11)

            Accuracy_SNR_12 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_12 += 100 * accuracy.eval(
                    feed_dict={
                        self._model.X: X_test_batch,
                        self._model.Noise: 0.1256
                    })
            Accuracy_SNR_12 = 100 * Accuracy_SNR_12 / TEST_MESS
            print("SNR = 12 test accuracy is: ", 100000 * Accuracy_SNR_12)
            np.save('SNR12_Accuracy', 100000 * Accuracy_SNR_12)

            Accuracy_SNR_13 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_13 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.112
                })
            Accuracy_SNR_13 = 100 * Accuracy_SNR_13 / TEST_MESS
            print("SNR = 13 test accuracy is: ", 100000 * Accuracy_SNR_13)
            np.save('SNR13_Accuracy', 100000 * Accuracy_SNR_13)

            Accuracy_SNR_14 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_14 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.1
                })
            Accuracy_SNR_14 = 100 * Accuracy_SNR_14 / TEST_MESS
            print("SNR = 14 test accuracy is: ", 100000 * Accuracy_SNR_14)
            np.save('SNR14_Accuracy', 100000 * Accuracy_SNR_14)

            Accuracy_SNR_15 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_15 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.089
                })
            Accuracy_SNR_15 = 100 * Accuracy_SNR_15 / TEST_MESS
            print("SNR = 15 test accuracy is: ", 100000 * Accuracy_SNR_15)
            np.save('SNR15_Accuracy', 100000 * Accuracy_SNR_15)

            Accuracy_SNR_16 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_16 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.079
                })
            Accuracy_SNR_16 = 100 * Accuracy_SNR_16 / TEST_MESS
            print("SNR = 16 test accuracy is: ", 100000 * Accuracy_SNR_16)
            np.save('SNR16_Accuracy', 100000 * Accuracy_SNR_16)

            Accuracy_SNR_17 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_17 += 100 * accuracy.eval(
                    feed_dict={
                        self._model.X: X_test_batch,
                        self._model.Noise: 0.0706
                    })
            Accuracy_SNR_17 = 100 * Accuracy_SNR_17 / TEST_MESS
            print("SNR = 17 test accuracy is: ", 100000 * Accuracy_SNR_17)
            np.save('SNR17_Accuracy', 100000 * Accuracy_SNR_17)

            Accuracy_SNR_18 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_18 += 100 * accuracy.eval(
                    feed_dict={
                        self._model.X: X_test_batch,
                        self._model.Noise: 0.0629
                    })
            Accuracy_SNR_18 = 100 * Accuracy_SNR_18 / TEST_MESS
            print("SNR = 18 test accuracy is: ", 100000 * Accuracy_SNR_18)
            np.save('SNR18_Accuracy', 100000 * Accuracy_SNR_18)

            Accuracy_SNR_19 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_19 += 100 * accuracy.eval(
                    feed_dict={
                        self._model.X: X_test_batch,
                        self._model.Noise: 0.0561
                    })
            Accuracy_SNR_19 = 100 * Accuracy_SNR_19 / TEST_MESS
            print("SNR = 19 test accuracy is: ", 100000 * Accuracy_SNR_19)
            np.save('SNR19_Accuracy', 100000 * Accuracy_SNR_19)

            Accuracy_SNR_20 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_20 += 100 * accuracy.eval(feed_dict={
                    self._model.X: X_test_batch,
                    self._model.Noise: 0.05
                })
            Accuracy_SNR_20 = 100 * Accuracy_SNR_20 / TEST_MESS
            print("SNR = 20 test accuracy is: ", 100000 * Accuracy_SNR_20)
            np.save('SNR20_Accuracy', 100000 * Accuracy_SNR_20)

            Accuracy_SNR_21 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(
                    data['X_test'], data['y_test'], 100):
                Accuracy_SNR_21 += 100 * accuracy.eval(
                    feed_dict={
                        self._model.X: X_test_batch,
                        self._model.Noise: 0.04456
                    })
            Accuracy_SNR_21 = 100 * Accuracy_SNR_21 / TEST_MESS
            print("SNR = 21 test accuracy is: ", 100000 * Accuracy_SNR_21)
            np.save('SNR21_Accuracy', 100000 * Accuracy_SNR_21)

            #Constellation
            #for X_cons_batch, y_cons in self._dataset.shuffle_batch(data['X_cons'], data['X_cons'], 20):
            signal_cons = self._model.get_para().eval(
                feed_dict={
                    self._model.X: data['X_cons'],
                    self._model.Noise: 0.5
                })
            np.save('Signal_Costillation', signal_cons)
Ejemplo n.º 13
0
class Trainer(object):
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._learning_rate = tf.placeholder(tf.float32,
                                             shape=[],
                                             name="learn_rate")

        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=self._learning_rate,
            beta1=0.9,
            beta2=0.999,
            epsilon=1e-08,
            use_locking=False,
            name='Adam')
        self._writer = tf.summary.FileWriter('./summary')

        self.out_real = self._model.get_reconstruction()[0]
        self.out_image = self._model.get_reconstruction()[1]
        #self.output = tf.concat([self.out_real, self.out_image], axis=1)

        self.Trans_real = self._model.get_Transmit()[0]
        self.Trans_image = self._model.get_Transmit()[1]

        self.loss_real = tf.math.subtract(self.Trans_real, self.out_real)
        self.loss_image = tf.math.subtract(self.Trans_image, self.out_image)
        #self.Trans = tf.concat([self.Trans_real, self.Trans_image], axis=1)

    def train_model(self):
        data = self._dataset.load_data()
        # loss function
        loss = tf.reduce_mean(
            tf.math.add(tf.math.square(self.loss_real),
                        tf.math.square(self.loss_image)))

        # Q function
        x_mag = tf.reduce_mean(
            tf.math.add(tf.math.square(self.Trans_real),
                        tf.math.square(self.Trans_image)))
        Q = tf.math.divide(x_mag, loss)

        # loss = tf.reduce_sum(tf.reduce_mean(tf.square(self.Trans-self.output), axis=0))
        # # Q function
        # x_mag = tf.reduce_sum(tf.reduce_mean(tf.square(self.Trans), axis=0))
        # Q = tf.math.divide(x_mag, loss)

        tf.summary.scalar('Loss', loss)
        training_op = self._optimizer.minimize(loss)
        saver = tf.train.Saver()
        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        #        K.clear_session()
        tf_config = tf.ConfigProto(inter_op_parallelism_threads=1,
                                   intra_op_parallelism_threads=1)
        #sess = tf.Session()
        with tf.Session(config=tf_config) as sess:
            init.run(feed_dict={self._model.mask: 0})
            print("Training Start")
            # variables_names = [v.name for v in tf.trainable_variables()]
            # values = sess.run(variables_names)
            # for k, v in zip(variables_names, values):
            #     print("Variable: ", k)
            #     print("Shape: ", v.shape)
            #     print(v)

            # tvars = tf.trainable_variables()
            # tvars_vals = sess.run(tvars)
            #
            # for var, val in zip(tvars, tvars_vals):
            #     print(var.name, val)

            # w1 = self._model.get_w1().eval(feed_dict={self._model.X_real: data['X_test_real_1'], self._model.X_image: data['X_test_image_1'],
            #                self._model.y_real: data['y_test_real_1'], self._model.y_image: data['y_test_image_1']})
            # print("w1 is:", w1[0,1:100])

            # real = self._model.get_middle_para()[0].eval(
            #     feed_dict={self._model.X_real: data['X_train_real'], self._model.X_image: data['X_train_image'],
            #                self._model.y_real: data['y_train_real'], self._model.y_image: data['y_train_image']})
            #
            # # print("predict:", self._model.get_middle_para()[1].eval(
            # #     feed_dict={self._model.X_real: data['X_test_real_1'], self._model.X_image: data['X_test_image_1'],
            # #                self._model.y_real: data['y_test_real_1'], self._model.y_image: data['y_test_image_1']}))
            # image = self._model.get_middle_para()[1].eval(
            #     feed_dict={self._model.X_real: data['X_train_real'], self._model.X_image: data['X_train_image'],
            #                self._model.y_real: data['y_train_real'], self._model.y_image: data['y_train_image']})
            # print("real shape is", real.shape)
            #
            # # np.savetxt('data_real_x.csv', real, delimiter=',')
            # # np.savetxt('data_image_x.csv', image, delimiter=',')
            # plt.scatter(real[1, :1024], image[1, :1024])
            # plt.show()

            # real = self._model.get_middle_para()[0].eval(
            #     feed_dict={self._model.X_real: data['X_test_real_2'], self._model.X_image: data['X_test_image_2'],
            #                self._model.y_real: data['y_test_real_2'], self._model.y_image: data['y_test_image_2']})
            #
            # # print("predict:", self._model.get_middle_para()[1].eval(
            # #     feed_dict={self._model.X_real: data['X_test_real_1'], self._model.X_image: data['X_test_image_1'],
            # #                self._model.y_real: data['y_test_real_1'], self._model.y_image: data['y_test_image_1']}))
            # image = self._model.get_middle_para()[1].eval(
            #     feed_dict={self._model.X_real: data['X_test_real_2'], self._model.X_image: data['X_test_image_2'],
            #                self._model.y_real: data['y_test_real_2'], self._model.y_image: data['y_test_image_2']})
            # print("real shape is", real.shape)
            #
            # # np.savetxt('data_real_y.csv', real, delimiter=',')
            # # np.savetxt('data_image_y.csv', image, delimiter=',')
            # plt.scatter(real[1, :1024], image[1, :1024])
            # plt.show()
            #
            #saver.restore(sess, "/data/temp/checkpoint/model.ckpt")
            #print("Model restored.")
            sess.graph.finalize()
            for epoch in range(0, self._n_epochs):
                train_loss = 0
                mask = 0  #2*np.floor(epoch/50)
                if epoch < 6:
                    learning_rate = 7e-4
                else:
                    learning_rate = 7e-4
                for X_batch_real, y_batch_real, X_batch_image, y_batch_image in self._dataset.shuffle_batch(
                        data['X_train_real'], data['y_train_real'],
                        data['X_train_image'], data['y_train_image'],
                        self._batch_size):
                    _, loss_batch = sess.run(
                        [training_op, loss],
                        feed_dict={
                            self._learning_rate: learning_rate,
                            self._model.X_real_in: X_batch_real,
                            self._model.X_image_in: X_batch_image,
                            self._model.y_real: y_batch_real,
                            self._model.y_image: y_batch_image,
                            self._model.mask: mask
                        })
                    train_loss += loss_batch

                summary = sess.run(summary_op,
                                   feed_dict={
                                       self._learning_rate: learning_rate,
                                       self._model.X_real_in: X_batch_real,
                                       self._model.X_image_in: X_batch_image,
                                       self._model.y_real: y_batch_real,
                                       self._model.y_image: y_batch_image,
                                       self._model.mask: mask
                                   })
                self._writer.add_summary(summary=summary, global_step=epoch)
                print(
                    epoch, "Training Loss:", train_loss, "Validation Loss",
                    loss.eval(
                        feed_dict={
                            self._learning_rate: learning_rate,
                            self._model.X_real_in: data['X_test_real_1'],
                            self._model.X_image_in: data['X_test_image_1'],
                            self._model.y_real: data['y_test_real_1'],
                            self._model.y_image: data['y_test_image_1'],
                            self._model.mask: mask
                        }), "Q factor:",
                    Q.eval(
                        feed_dict={
                            self._learning_rate: learning_rate,
                            self._model.X_real_in: data['X_test_real_1'],
                            self._model.X_image_in: data['X_test_image_1'],
                            self._model.y_real: data['y_test_real_1'],
                            self._model.y_image: data['y_test_image_1'],
                            self._model.mask: mask
                        }))

                if epoch % 50 == 0:
                    save_path = saver.save(sess,
                                           "/data/temp/checkpoint/model.ckpt")
                    print("Model saved")

            print('Final Mask', mask)
            # Power
            real = self._model.get_reconstruction()[0].eval(
                feed_dict={
                    self._learning_rate: learning_rate,
                    self._model.X_real_in: data['X_test_real_1'],
                    self._model.X_image_in: data['X_test_image_1'],
                    self._model.y_real: data['y_test_real_1'],
                    self._model.y_image: data['y_test_image_1'],
                    self._model.mask: mask
                })

            print(
                "power = 1:",
                loss.eval(
                    feed_dict={
                        self._learning_rate: learning_rate,
                        self._model.X_real_in: data['X_test_real_1'],
                        self._model.X_image_in: data['X_test_image_1'],
                        self._model.y_real: data['y_test_real_1'],
                        self._model.y_image: data['y_test_image_1'],
                        self._model.mask: mask
                    }))
            image = self._model.get_reconstruction()[1].eval(
                feed_dict={
                    self._learning_rate: learning_rate,
                    self._model.X_real_in: data['X_test_real_1'],
                    self._model.X_image_in: data['X_test_image_1'],
                    self._model.y_real: data['y_test_real_1'],
                    self._model.y_image: data['y_test_image_1'],
                    self._model.mask: mask
                })

            np.savetxt('out_real.csv', real, delimiter=',')
            np.savetxt('out_image.csv', image, delimiter=',')
Ejemplo n.º 14
0
class Trainer(object):
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._learning_rate = tf.placeholder(tf.float32,
                                             shape=[],
                                             name="learn_rate")

        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=self._learning_rate)
        #AdaBoundOptimizer(learning_rate=0.01, final_lr=0.01, beta1=0.9, beta2=0.999, amsbound=False)
        self._writer = tf.summary.FileWriter('./summary')

        self.out_real = self._model.get_reconstruction()[0]
        self.out_image = self._model.get_reconstruction()[1]
        self.loss_real = tf.math.subtract(self._model.y_real, self.out_real)
        self.loss_image = tf.math.subtract(self._model.y_image, self.out_image)
        print("self._model.y_real", self._model.y_real.shape)
        print("self.out_real", self.out_real.shape)

    def train_model(self):
        data = self._dataset.load_data()
        # loss function
        loss = tf.reduce_sum(
            tf.math.add(tf.math.square(self.loss_real),
                        tf.math.square(self.loss_image)))

        # Q function
        x_mag = tf.reduce_sum(
            tf.math.add(tf.math.square(self._model.y_real),
                        tf.math.square(self._model.y_image)))
        Q = tf.math.divide(x_mag, loss)

        tf.summary.scalar('Loss', loss)
        training_op = self._optimizer.minimize(loss)
        saver = tf.train.Saver()
        summary_op = tf.summary.merge_all()

        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()
            print("Training Start")
            # variables_names = [v.name for v in tf.trainable_variables()]
            # values = sess.run(variables_names)
            # for k, v in zip(variables_names, values):
            #     print("Variable: ", k)
            #     print("Shape: ", v.shape)
            #     print(v)

            # tvars = tf.trainable_variables()
            # tvars_vals = sess.run(tvars)
            #
            # for var, val in zip(tvars, tvars_vals):
            #     print(var.name, val)

            # w1 = self._model.get_w1().eval(feed_dict={self._model.X_real: data['X_test_real_1'], self._model.X_image: data['X_test_image_1'],
            #                self._model.y_real: data['y_test_real_1'], self._model.y_image: data['y_test_image_1']})
            # print("w1 is:", w1[0,1:100])

            # real = self._model.get_middle_para()[0].eval(
            #     feed_dict={self._model.X_real: data['X_train_real'], self._model.X_image: data['X_train_image'],
            #                self._model.y_real: data['y_train_real'], self._model.y_image: data['y_train_image']})
            #
            # # print("predict:", self._model.get_middle_para()[1].eval(
            # #     feed_dict={self._model.X_real: data['X_test_real_1'], self._model.X_image: data['X_test_image_1'],
            # #                self._model.y_real: data['y_test_real_1'], self._model.y_image: data['y_test_image_1']}))
            # image = self._model.get_middle_para()[1].eval(
            #     feed_dict={self._model.X_real: data['X_train_real'], self._model.X_image: data['X_train_image'],
            #                self._model.y_real: data['y_train_real'], self._model.y_image: data['y_train_image']})
            # print("real shape is", real.shape)
            #
            # # np.savetxt('data_real_x.csv', real, delimiter=',')
            # # np.savetxt('data_image_x.csv', image, delimiter=',')
            # plt.scatter(real[1, :1024], image[1, :1024])
            # plt.show()

            real = self._model.get_middle_para()[0].eval(
                feed_dict={
                    self._model.X_real: data['X_train_real'],
                    self._model.X_image: data['X_train_image'],
                    self._model.y_real: data['y_train_real'],
                    self._model.y_image: data['y_train_image']
                })

            # print("predict:", self._model.get_middle_para()[1].eval(
            #     feed_dict={self._model.X_real: data['X_test_real_1'], self._model.X_image: data['X_test_image_1'],
            #                self._model.y_real: data['y_test_real_1'], self._model.y_image: data['y_test_image_1']}))
            image = self._model.get_middle_para()[1].eval(
                feed_dict={
                    self._model.X_real: data['X_train_real'],
                    self._model.X_image: data['X_train_image'],
                    self._model.y_real: data['y_train_real'],
                    self._model.y_image: data['y_train_image']
                })
            print("real shape is", real.shape)

            np.savetxt('data_real_y.csv', real, delimiter=',')
            np.savetxt('data_image_y.csv', image, delimiter=',')
            plt.scatter(real[0, :1024], image[0, :1024])
            plt.show()
Ejemplo n.º 15
0
class Trainer(object):
    def __init__(self, n_epochs, tr_batch_size, dataset_params, encoder_params, channel_params, decoder_params, decision_params, optimizer_params):
        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model(dataset_params=dataset_params, encoder_params=encoder_params, channel_params=channel_params, decoder_params=decoder_params, decision_params=decision_params)
        self._optimizer = tf.train.AdamOptimizer(learning_rate=optimizer_params['lr'])      ## Tried GradientDescentOptimizer, but it seems hard to converge, but the paper mentioned SGD with Adam optimizer, so I thing the way is SGD and the optimizer is Adam
        self._writer = tf.summary.FileWriter('./summary')

    def train_model(self):
        data = self._dataset.load_data()
        # loss function
        #print("self._model.get_reconstruction()", self._model.get_reconstruction().shape)
        self._decoding_mess = tf.slice(self._model.X, [0, 6, 0], [-1, 1, -1])
        #print("self._decoding_mess shape is: ", self._decoding_mess.shape)
        loss = tf.reduce_mean(-tf.reduce_sum(self._decoding_mess * tf.log(self._model.get_reconstruction()+1e-8), 2))
        #loss = tf.reduce_mean(tf.keras.backend.categorical_crossentropy(self._model.X, self._model.get_reconstruction()))

        X_batch_input = tf.math.argmax(self._decoding_mess, 2)
        correct_predict = tf.equal(X_batch_input, tf.math.argmax(self._model.get_reconstruction(), 2))
        accuracy = tf.reduce_mean(tf.cast(correct_predict, tf.float32))

        tf.summary.scalar('Loss', loss)
        training_op = self._optimizer.minimize(loss)

        summary_op = tf.summary.merge_all()
        init = tf.global_variables_initializer()
        with tf.Session() as sess:
            init.run()
            print("Training Start")
            X_train_batch = np.zeros((TRAIN_MESS, group_num, 256))
            for epoch in range(self._n_epochs):
                train_loss = 0
                if epoch <= 20:
                    for X_batch, y_batch in self._dataset.shuffle_batch(data['X_train'], data['y_train'], self._batch_size):
                        for i in range(self._batch_size-12):
                            k = i
                            for j in range(group_num):
                                X_train_batch[i, j, :] = X_batch[k, :, :]
                                k = k + 1

                        _, loss_batch = sess.run([training_op, loss], feed_dict={self._model.X: X_train_batch, self._model.Noise: 0.366})
                        train_loss += loss_batch

                    summary = sess.run(summary_op, feed_dict={self._model.X: data['X_valid'], self._model.Noise: 0.366})
                    self._writer.add_summary(summary=summary, global_step=epoch)
                    # TODO(ali): Clean this code for the LOVE PF GOD!!!
                    print(epoch, "Training Loss:", train_loss, "Validation Loss",
                          loss.eval(feed_dict={self._model.X: data['X_valid'], self._model.Noise: 0.366}), "Accuracy",
                                               100*accuracy.eval(feed_dict={self._model.X: data['X_valid'], self._model.Noise: 0.366}))

                elif epoch <= 40:
                    for X_batch, y_batch in self._dataset.shuffle_batch(data['X_train'], data['y_train'], 100):
                        for i in range(100-12):
                            k = i
                            for j in range(group_num):
                                X_train_batch[i, j, :] = X_batch[k, :, :]
                                k = k + 1

                        _, loss_batch = sess.run([training_op, loss],
                                                 feed_dict={self._model.X: X_train_batch, self._model.Noise: 0.366})
                        train_loss += loss_batch

                    summary = sess.run(summary_op, feed_dict={self._model.X: data['X_valid'], self._model.Noise: 0.366})
                    self._writer.add_summary(summary=summary, global_step=epoch)
                    # TODO(ali): Clean this code for the LOVE PF GOD!!!
                    print(epoch, "Training Loss:", train_loss, "Validation Loss",
                          loss.eval(feed_dict={self._model.X: data['X_valid'], self._model.Noise: 0.366}), "Accuracy",
                          100 * accuracy.eval(feed_dict={self._model.X: data['X_valid'], self._model.Noise: 0.366}))

                else:
                    for X_batch, y_batch in self._dataset.shuffle_batch(data['X_train'], data['y_train'], 200):
                        for i in range(200-12):
                            k = i
                            for j in range(group_num):
                                X_train_batch[i, j, :] = X_batch[k, :, :]
                                k = k + 1

                        _, loss_batch = sess.run([training_op, loss],
                                                 feed_dict={self._model.X: X_train_batch, self._model.Noise: 0.366})
                        train_loss += loss_batch

                    summary = sess.run(summary_op, feed_dict={self._model.X: data['X_valid'], self._model.Noise: 0.366})
                    self._writer.add_summary(summary=summary, global_step=epoch)
                    # TODO(ali): Clean this code for the LOVE PF GOD!!!
                    print(epoch, "Training Loss:", train_loss, "Validation Loss",
                          loss.eval(feed_dict={self._model.X: data['X_valid'], self._model.Noise: 0.366}), "Accuracy",
                          100 * accuracy.eval(feed_dict={self._model.X: data['X_valid'], self._model.Noise: 0.366}))

            # Testing
            Accuracy_SNR_0 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_0 += 100*accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.5})
            Accuracy_SNR_0 = 100*Accuracy_SNR_0/TEST_MESS
            print("SNR = 0 test accuracy is: ", 100000*Accuracy_SNR_0)

            Accuracy_SNR_1 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_1 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.483})
            Accuracy_SNR_1 = 100 * Accuracy_SNR_1 / TEST_MESS
            print("SNR = 1 test accuracy is: ", 100000 * Accuracy_SNR_1)

            Accuracy_SNR_2 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_2 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.4665})
            Accuracy_SNR_2 = 100 * Accuracy_SNR_2 / TEST_MESS
            print("SNR = 2 test accuracy is: ", 100000 * Accuracy_SNR_2)

            Accuracy_SNR_3 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_3 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.45})
            Accuracy_SNR_3 = 100 * Accuracy_SNR_3 / TEST_MESS
            print("SNR = 3 test accuracy is: ", 100000 * Accuracy_SNR_3)

            Accuracy_SNR_4 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_4 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.435})
            Accuracy_SNR_4 = 100 * Accuracy_SNR_4 / TEST_MESS
            print("SNR = 4 test accuracy is: ", 100000 * Accuracy_SNR_4)

            Accuracy_SNR_5 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_5 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.42})
            Accuracy_SNR_5 = 100 * Accuracy_SNR_5 / TEST_MESS
            print("SNR = 5 test accuracy is: ", 100000 * Accuracy_SNR_5)

            Accuracy_SNR_6 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_6 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.406})
            Accuracy_SNR_6 = 100 * Accuracy_SNR_6 / TEST_MESS
            print("SNR = 6 test accuracy is: ", 100000 * Accuracy_SNR_6)

            Accuracy_SNR_7 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_7 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.392})
            Accuracy_SNR_7 = 100 * Accuracy_SNR_7 / TEST_MESS
            print("SNR = 7 test accuracy is: ", 100000 * Accuracy_SNR_7)

            Accuracy_SNR_8 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_8 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.3789})
            Accuracy_SNR_8 = 100 * Accuracy_SNR_8 / TEST_MESS
            print("SNR = 8 test accuracy is: ", 100000 * Accuracy_SNR_8)

            Accuracy_SNR_9 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_9 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.366})
            Accuracy_SNR_9 = 100 * Accuracy_SNR_9 / TEST_MESS
            print("SNR = 9 test accuracy is: ", 100000 * Accuracy_SNR_9)

            Accuracy_SNR_10 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_10 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.353})
            Accuracy_SNR_10 = 100 * Accuracy_SNR_10 / TEST_MESS
            print("SNR = 10 test accuracy is: ", 100000 * Accuracy_SNR_10)

            Accuracy_SNR_11 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_11 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.3415})
            Accuracy_SNR_11 = 100 * Accuracy_SNR_11 / TEST_MESS
            print("SNR = 11 test accuracy is: ", 100000 * Accuracy_SNR_11)

            Accuracy_SNR_12 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_12 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.3299})
            Accuracy_SNR_12 = 100 * Accuracy_SNR_12 / TEST_MESS
            print("SNR = 12 test accuracy is: ", 100000 * Accuracy_SNR_12)

            Accuracy_SNR_13 = 0
            for X_test_batch, y_test_batch in self._dataset.shuffle_batch(data['X_test'], data['y_test'], 100):
                Accuracy_SNR_13 += 100 * accuracy.eval(feed_dict={self._model.X: X_test_batch, self._model.Noise: 0.3186})
            Accuracy_SNR_13 = 100 * Accuracy_SNR_13 / TEST_MESS
            print("SNR = 13 test accuracy is: ", 100000 * Accuracy_SNR_13)

            #Constellation
            for X_cons_batch, y_cons in self._dataset.shuffle_batch(data['X_cons'], data['X_cons'], 20):
                signal_cons = self._model.get_para().eval(feed_dict={self._model.X: X_cons_batch, self._model.Noise: 0.5})
            np.save('Signal_Costillation', signal_cons)
            #print("signal_cons shape is: ", signal_cons.shape)
            #print("signal_cons is: ", signal_cons)

            #accuracy.eval()