Esempio n. 1
0
    def __init__(self, height, width, channel, num_class, \
        ksize, radix=4, kpaths=4):
        super(Neuralnet, self).__init__()

        self.height, self.width, self.channel, self.num_class = height, width, channel, num_class
        self.ksize, self.radix, self.kpaths = ksize, radix, kpaths
        self.customlayers = lay.Layers()

        self.forward = tf.function(self.__call__)
Esempio n. 2
0
    def __init__(self, \
        height, width, channel, ksize, zdim, \
        learning_rate=1e-3, path='', verbose=True):

        print("\nInitializing Neural Network...")
        self.height, self.width, self.channel, self.ksize, self.zdim = \
            height, width, channel, ksize, zdim
        self.learning_rate = learning_rate
        self.path_ckpt = path

        self.x = tf.compat.v1.placeholder(tf.float32, [None, self.height, self.width, self.channel], \
            name="x")
        self.z = tf.compat.v1.placeholder(tf.float32, [None, self.zdim], \
            name="z")
        self.batch_size = tf.compat.v1.placeholder(tf.int32, shape=[], \
            name="batch_size")
        self.training = tf.compat.v1.placeholder(tf.bool, shape=[], \
            name="training")

        self.layer = lay.Layers()

        self.variables, self.losses = {}, {}
        self.__build_model(x_real=self.x,
                           z=self.z,
                           ksize=self.ksize,
                           verbose=verbose)
        self.__build_loss()

        with tf.control_dependencies(self.variables['ops_d']):
            self.optimizer_d = tf.compat.v1.train.AdamOptimizer( \
                self.learning_rate/5, name='Adam_d').minimize(\
                self.losses['loss_d'], var_list=self.variables['params_d'])

        with tf.control_dependencies(self.variables['ops_g']):
            self.optimizer_g = tf.compat.v1.train.AdamOptimizer( \
                self.learning_rate, name='Adam_g').minimize(\
                self.losses['loss_g'], var_list=self.variables['params_g'])

        with tf.control_dependencies(self.variables['ops_e']):
            self.optimizer_e = tf.compat.v1.train.AdamOptimizer( \
                self.learning_rate, name='Adam_e').minimize(\
                self.losses['loss_e'], var_list=self.variables['params_e'])

        tf.compat.v1.summary.scalar('f-AnoGAN/mean_real',
                                    self.losses['mean_real'])
        tf.compat.v1.summary.scalar('f-AnoGAN/mean_fake',
                                    self.losses['mean_fake'])
        tf.compat.v1.summary.scalar('f-AnoGAN/mean_izi', self.losses['izi'])
        tf.compat.v1.summary.scalar('f-AnoGAN/mean_ziz', self.losses['ziz'])
        tf.compat.v1.summary.scalar('f-AnoGAN/loss_d', self.losses['loss_d'])
        tf.compat.v1.summary.scalar('f-AnoGAN/loss_g', self.losses['loss_g'])
        tf.compat.v1.summary.scalar('f-AnoGAN/loss_e', self.losses['loss_e'])
        self.summaries = tf.compat.v1.summary.merge_all()

        self.__init_session(path=self.path_ckpt)
Esempio n. 3
0
    def __init__(self, height, width, channel, num_class, ksize, learning_rate=1e-3, ckpt_dir='./Checkpoint'):

        print("\nInitializing Short-ResNet...")
        self.height, self.width, self.channel, self.num_class = height, width, channel, num_class
        self.ksize, self.learning_rate = ksize, learning_rate
        self.ckpt_dir = ckpt_dir

        self.customlayers = lay.Layers()
        self.model(tf.zeros([1, self.height, self.width, self.channel]), verbose=True)

        self.optimizer = tf.optimizers.Adam(self.learning_rate)

        self.summary_writer = tf.summary.create_file_writer(self.ckpt_dir)
Esempio n. 4
0
    def __init__(self, \
        height, width, channel, ksize, \
        learning_rate=1e-3, path='', verbose=True):

        print("\nInitializing Neural Network...")
        self.height, self.width, self.channel, self.ksize = \
            height, width, channel, ksize
        self.learning_rate = learning_rate
        self.path_ckpt = path

        self.x = tf.compat.v1.placeholder(tf.float32, [None, self.height, self.width, self.channel], \
            name="x")
        self.batch_size = tf.compat.v1.placeholder(tf.int32, shape=[], \
            name="batch_size")
        self.training = tf.compat.v1.placeholder(tf.bool, shape=[], \
            name="training")

        self.layer = lay.Layers()

        self.variables, self.losses = {}, {}
        self.__build_model(ksize=self.ksize, verbose=verbose)
        self.__build_loss()

        with tf.control_dependencies(self.variables['ops_d']):
            self.optimizer_d = tf.compat.v1.train.AdamOptimizer( \
                self.learning_rate, name='Adam_d').minimize(\
                self.losses['loss_d'], var_list=self.variables['params_d'])

        with tf.control_dependencies(self.variables['ops_g']):
            self.optimizer_g = tf.compat.v1.train.AdamOptimizer( \
                self.learning_rate, name='Adam_g').minimize(\
                self.losses['loss_g'], var_list=self.variables['params_g'])

        # L𝐷 = ‖𝑋 βˆ’ 𝐷(𝑋)β€–1 βˆ’ ‖𝐺(𝑋) βˆ’ 𝐷(𝐺(𝑋))β€–1
        tf.compat.v1.summary.scalar('ADAE/D/loss_d_term1', \
            tf.compat.v1.reduce_mean(self.losses['loss_d_term1']))
        tf.compat.v1.summary.scalar('ADAE/D/loss_d_term2', \
            tf.compat.v1.reduce_mean(self.losses['loss_d_term2']))
        tf.compat.v1.summary.scalar('ADAE/D/loss_d', self.losses['loss_d'])

        # L𝐺 = ‖𝑋 βˆ’ 𝐺(𝑋)β€–1+‖𝐺(𝑋) βˆ’ 𝐷(𝐺(𝑋))β€–1
        tf.compat.v1.summary.scalar('ADAE/G/loss_g_term1', \
            tf.compat.v1.reduce_mean(self.losses['loss_g_term1']))
        tf.compat.v1.summary.scalar('ADAE/G/loss_g_term2', \
            tf.compat.v1.reduce_mean(self.losses['loss_g_term2']))
        tf.compat.v1.summary.scalar('ADAE/G/loss_g', self.losses['loss_g'])

        self.summaries = tf.compat.v1.summary.merge_all()

        self.__init_session(path=self.path_ckpt)
Esempio n. 5
0
    def __init__(self, \
        height, width, channel, ksize, \
        w_enc=1, w_con=50, w_adv=1, \
        learning_rate=1e-3, path='', verbose=True):

        print("\nInitializing Neural Network...")
        self.height, self.width, self.channel, self.ksize = height, width, channel, ksize
        self.w_enc, self.w_con, self.w_adv = w_enc, w_con, w_adv
        self.learning_rate = learning_rate
        self.path_ckpt = path

        self.x = tf.compat.v1.placeholder(
            tf.float32, [None, self.height, self.width, self.channel])
        self.batch_size = tf.compat.v1.placeholder(tf.int32, shape=[])
        self.training = tf.compat.v1.placeholder(tf.bool, shape=[])

        self.layer = lay.Layers()

        self.conv_shapes = []
        self.variables, self.losses = {}, {}
        self.__build_model(x_real=self.x, ksize=self.ksize, verbose=verbose)
        self.__build_loss()

        with tf.control_dependencies(
                tf.compat.v1.get_collection(
                    tf.compat.v1.GraphKeys.UPDATE_OPS)):
            self.optimizer = tf.compat.v1.train.AdamOptimizer( \
                self.learning_rate).minimize(self.losses['target'])

        tf.compat.v1.summary.scalar('GANomaly/loss_enc',
                                    self.losses['mean_enc'])
        tf.compat.v1.summary.scalar('GANomaly/loss_con',
                                    self.losses['mean_con'])
        tf.compat.v1.summary.scalar('GANomaly/loss_adv',
                                    self.losses['mean_adv'])
        tf.compat.v1.summary.scalar('GANomaly/loss_target',
                                    self.losses['target'])
        self.summaries = tf.compat.v1.summary.merge_all()

        self.__init_session(path=self.path_ckpt)