Beispiel #1
0
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._learning_rate = tf.placeholder(tf.float32,
                                             shape=[],
                                             name="learn_rate")

        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=self._learning_rate,
            beta1=0.9,
            beta2=0.999,
            epsilon=1e-08,
            use_locking=False,
            name='Adam')
        self._writer = tf.summary.FileWriter('./summary')

        self.out_real = self._model.get_reconstruction()[0]
        self.out_image = self._model.get_reconstruction()[1]
        #self.output = tf.concat([self.out_real, self.out_image], axis=1)

        self.Trans_real = self._model.get_Transmit()[0]
        self.Trans_image = self._model.get_Transmit()[1]

        self.loss_real = tf.math.subtract(self.Trans_real, self.out_real)
        self.loss_image = tf.math.subtract(self.Trans_image, self.out_image)
Beispiel #2
0
 def __init__(self, n_epochs, tr_batch_size, dataset_params, encoder_params, channel_params, decoder_params, decision_params, optimizer_params):
     self._n_epochs = n_epochs
     self._batch_size = tr_batch_size
     self._dataset = DatasetMNIST(val_size=10000)
     self._model = Model(dataset_params=dataset_params, encoder_params=encoder_params, channel_params=channel_params, decoder_params=decoder_params, decision_params=decision_params)
     self._optimizer = tf.train.AdamOptimizer(learning_rate=optimizer_params['lr'])      ## Tried GradientDescentOptimizer, but it seems hard to converge, but the paper mentioned SGD with Adam optimizer, so I thing the way is SGD and the optimizer is Adam
     self._writer = tf.summary.FileWriter('./summary')
Beispiel #3
0
 def __init__(self, n_epochs, tr_batch_size, optimizer_params):
     self._n_epochs = n_epochs
     self._batch_size = tr_batch_size
     self._dataset = DatasetMNIST(val_size=10000)
     self._model = Model()
     self._optimizer = tf.train.AdamOptimizer(learning_rate=optimizer_params['lr'])
     self._writer = tf.summary.FileWriter('./summary')
Beispiel #4
0
 def __init__(self, n_epochs, tr_batch_size, dataset_params, encoder_params,
              decoder_params, optimizer_params):
     self._n_epochs = n_epochs
     self._batch_size = tr_batch_size
     self._dataset = DatasetMNIST(val_size=10000)
     self._model = Model(dataset_params=dataset_params,
                         encoder_params=encoder_params,
                         decoder_params=decoder_params)
     self._optimizer = tf.train.GradientDescentOptimizer(
         learning_rate=optimizer_params['lr'])
     self._writer = tf.summary.FileWriter('./summary')
Beispiel #5
0
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=optimizer_params['lr'])
        self._writer = tf.summary.FileWriter('./summary')

        self.out_real = self._model.get_reconstruction()[0]
        self.out_image = self._model.get_reconstruction()[1]
        self.loss_real = tf.math.subtract(self._model.y_real, self.out_real)
        self.loss_image = tf.math.subtract(self._model.y_image, self.out_image)
Beispiel #6
0
    def __init__(self, n_epochs, tr_batch_size, optimizer_params):
        self._learning_rate = tf.placeholder(tf.float32,
                                             shape=[],
                                             name="learn_rate")

        self._n_epochs = n_epochs
        self._batch_size = tr_batch_size
        self._dataset = DatasetMNIST(val_size=10000)
        self._model = Model()
        self._optimizer = tf.train.AdamOptimizer(
            learning_rate=self._learning_rate)
        #AdaBoundOptimizer(learning_rate=0.01, final_lr=0.01, beta1=0.9, beta2=0.999, amsbound=False)
        self._writer = tf.summary.FileWriter('./summary')

        self.out_real = self._model.get_reconstruction()[0]
        self.out_image = self._model.get_reconstruction()[1]
        self.loss_real = tf.math.subtract(self._model.y_real, self.out_real)
        self.loss_image = tf.math.subtract(self._model.y_image, self.out_image)
        print("self._model.y_real", self._model.y_real.shape)
        print("self.out_real", self.out_real.shape)
Beispiel #7
0
 def __init__(self, n_epochs, tr_batch_size, optimizer_params):
     self._n_epochs = n_epochs
     self._batch_size = tr_batch_size
     self._dataset = DatasetMNIST(val_size=10000)
     self._model = Model()