def model(x, y_, reuse, is_train=False):
    W_init = tf.truncated_normal_initializer(stddev=5e-2)
    W_init2 = tf.truncated_normal_initializer(stddev=0.04)
    b_init2 = tf.constant_initializer(value=0.1)
    with tf.variable_scope("model", reuse=reuse):
        tl.layers.set_name_reuse(reuse)
        net = InputLayer(x, name='input')
        net = Conv2d(net,
                     32, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     W_init=W_init,
                     name='cnn1')
        net = Conv2d(net,
                     32, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     W_init=W_init,
                     name='cnn2',
                     padding="VALID")
        net = MaxPool2d(net, name='pool1', padding="VALID")
        net = DropoutLayer(net, keep=0.75, is_train=is_train, name='drop1')

        net = Conv2d(net,
                     64, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     padding='SAME',
                     W_init=W_init,
                     name='cnn3')
        net = Conv2d(net,
                     64, (3, 3), (1, 1),
                     act=tf.nn.relu,
                     W_init=W_init,
                     name='cnn4',
                     padding="VALID")
        net = MaxPool2d(net, name='pool2', padding="VALID")
        net = DropoutLayer(net, keep=0.75, is_train=is_train, name='drop2')

        net = FlattenLayer(net, name='flatten')
        net = DenseLayer(net,
                         n_units=512,
                         act=tf.nn.relu,
                         W_init=W_init2,
                         b_init=b_init2,
                         name='d1relu')
        net = DenseLayer(net,
                         n_units=10,
                         act=tf.identity,
                         W_init=tf.truncated_normal_initializer(stddev=1 /
                                                                192.0),
                         name='output')  # output: (batch_size, 10)
        y = net.outputs

        loss = tl.cost.cross_entropy(y, y_, name='cost')

        correct_prediction = tf.equal(tf.argmax(y, 1), y_)
        acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        return net, loss, acc
Example #2
0
def model(x, is_train, reuse):
    with tf.variable_scope("STN", reuse=reuse):
        nin = InputLayer(x, name='in')
        ## 1. Localisation network
        # use MLP as the localisation net
        nt = FlattenLayer(nin, name='flatten')
        nt = DenseLayer(nt, n_units=20, act=tf.nn.tanh, name='dense1')
        nt = DropoutLayer(nt, 0.8, True, is_train, name='drop1')
        # you can also use CNN instead for MLP as the localisation net
        # nt = Conv2d(nin, 16, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc1')
        # nt = Conv2d(nt, 8, (3, 3), (2, 2), act=tf.nn.relu, padding='SAME', name='tc2')
        ## 2. Spatial transformer module (sampler)
        n = SpatialTransformer2dAffineLayer(nin,
                                            nt,
                                            out_size=[40, 40],
                                            name='spatial')
        s = n
        ## 3. Classifier
        n = Conv2d(n,
                   16, (3, 3), (2, 2),
                   act=tf.nn.relu,
                   padding='SAME',
                   name='conv1')
        n = Conv2d(n,
                   16, (3, 3), (2, 2),
                   act=tf.nn.relu,
                   padding='SAME',
                   name='conv2')
        n = FlattenLayer(n, name='flatten2')
        n = DenseLayer(n, n_units=1024, act=tf.nn.relu, name='out1')
        n = DenseLayer(n, n_units=10, act=tf.identity, name='out2')
    return n, s
Example #3
0
    def _build_net(self, is_train=True, reuse=None):
        with tf.variable_scope(self.name, reuse=reuse):
            n = InputLayer(self.x / 255, name='in')

            n = Conv2d(n, 32, (3, 3), (1, 1), tf.nn.relu, "VALID", name='c1/1')
            n = Conv2d(n, 32, (3, 3), (1, 1), tf.nn.relu, "VALID", name='c1/2')
            n = MaxPool2d(n, (2, 2), (2, 2), 'VALID', name='max1')

            n = DropoutLayer(n,
                             0.75,
                             is_fix=True,
                             is_train=is_train,
                             name='drop1')

            n = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, "VALID", name='c2/1')
            n = Conv2d(n, 64, (3, 3), (1, 1), tf.nn.relu, "VALID", name='c2/2')
            n = MaxPool2d(n, (2, 2), (2, 2), 'VALID', name='max2')
            n = DropoutLayer(n,
                             0.75,
                             is_fix=True,
                             is_train=is_train,
                             name='drop2')

            n = FlattenLayer(n, name='f')
            n = DenseLayer(n, 512, tf.nn.relu, name='dense1')
            n = DropoutLayer(n,
                             0.5,
                             is_fix=True,
                             is_train=is_train,
                             name='drop3')
            n = DenseLayer(n, n_action, tf.nn.tanh, name='o')

        if is_train:
            self.n_train = n
        else:
            self.n_test = n
Example #4
0
    def squeezenetv1(cls, x, end_with='output', is_train=False, reuse=None):
        with tf.compat.v1.variable_scope("squeezenetv1", reuse=reuse):
            with tf.compat.v1.variable_scope("input"):
                n = InputLayer(x)
                # n = Conv2d(n, 96, (7,7),(2,2),tf.nn.relu,'SAME',name='conv1')
                n = Conv2d(n,
                           64, (3, 3), (2, 2),
                           tf.nn.relu,
                           'SAME',
                           name='conv1')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire2"):
                n = Conv2d(n,
                           16, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            64, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            64, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire3"):
                n = Conv2d(n,
                           16, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            64, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            64, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire4"):
                n = Conv2d(n,
                           32, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            128, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            128, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire5"):
                n = Conv2d(n,
                           32, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            128, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            128, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
                n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire6"):
                n = Conv2d(n,
                           48, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            192, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            192, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire7"):
                n = Conv2d(n,
                           48, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            192, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            192, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire8"):
                n = Conv2d(n,
                           64, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            256, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            256, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("fire9"):
                n = Conv2d(n,
                           64, (1, 1), (1, 1),
                           tf.nn.relu,
                           'SAME',
                           name='squeeze1x1')
                n1 = Conv2d(n,
                            256, (1, 1), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand1x1')
                n2 = Conv2d(n,
                            256, (3, 3), (1, 1),
                            tf.nn.relu,
                            'SAME',
                            name='expand3x3')
                n = ConcatLayer([n1, n2], -1, name='concat')
            if end_with in n.outputs.name:
                return n

            with tf.compat.v1.variable_scope("output"):
                n = DropoutLayer(n,
                                 keep=0.5,
                                 is_fix=True,
                                 is_train=is_train,
                                 name='drop1')
                n = Conv2d(n,
                           1000, (1, 1), (1, 1),
                           padding='VALID',
                           name='conv10')  # 13, 13, 1000
                n = GlobalMeanPool2d(n)
            if end_with in n.outputs.name:
                return n

            raise Exception("end_with : input, fire2, fire3 ... fire9, output")
    def __get_network__(self,
                        model_name,
                        encode_seqs,
                        reuse=False,
                        is_train=True):
        with tf.variable_scope(model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_in = InputLayer(inputs=encode_seqs, name="in_word_embed")
            '''
            net_in = ReshapeLayer(
                net_in,
                (-1, self.max_length, self.word_embedding_dim, 1),
                name="reshape"
            )
            '''

            filter_length = [2, 4, 8]
            n_filter = 600

            net_cnn_list = list()

            for fsz in filter_length:

                net_cnn = Conv1d(net_in,
                                 n_filter=n_filter,
                                 filter_size=fsz,
                                 stride=1,
                                 act=tf.nn.relu,
                                 name="cnn%d" % fsz)
                net_cnn.outputs = tf.reduce_max(net_cnn.outputs,
                                                axis=1,
                                                name="global_maxpool%d" % fsz)
                net_cnn_list.append(net_cnn)

            net_cnn = ConcatLayer(net_cnn_list, concat_dim=-1)
            '''
            net_cnn = Conv1d(net_in, 400, 8, act=tf.nn.relu, name="cnn_1")
            net_cnn = MaxPool1d(net_cnn, 2, 2, padding="valid", name="maxpool_1")

            net_cnn = Conv1d(net_cnn, 600, 4, act=tf.nn.relu, name="cnn_2")
            net_cnn = MaxPool1d(net_cnn, 2, 2, padding="valid", name="maxpool_2")

            net_cnn = Conv1d(net_cnn, 600, 2, act=tf.nn.relu, name="cnn_3")
            net_cnn = MaxPool1d(net_cnn, 2, 2, padding="valid", name="maxpool_3")

            net_cnn = FlattenLayer(net_cnn, name="flatten")
            '''
            '''
            net_cnn = Conv2d(net_in, 64, (8, 8), act=tf.nn.relu, name="cnn_1")
            net_cnn = MaxPool2d(net_cnn, (2, 2), padding="valid", name="maxpool_1")

            net_cnn = Conv2d(net_cnn, 32, (4, 4), act=tf.nn.relu, name="cnn_2")
            net_cnn = MaxPool2d(net_cnn, (2, 4), padding="valid", name="maxpool_2")

            net_cnn = Conv2d(net_cnn, 8, (2, 2), act=tf.nn.relu, name="cnn_3")
            net_cnn = MaxPool2d(net_cnn, (2, 2), padding="valid", name="maxpool_3")

            net_cnn = FlattenLayer(net_cnn, name="flatten")
            '''

            net_cnn = DropoutLayer(net_cnn,
                                   keep=0.5,
                                   is_fix=True,
                                   is_train=is_train,
                                   name='drop1')

            net_fc = DenseLayer(net_cnn,
                                n_units=400,
                                act=tf.nn.relu,
                                name="fc_1")

            net_fc = DropoutLayer(net_fc,
                                  keep=0.5,
                                  is_fix=True,
                                  is_train=is_train,
                                  name='drop2')

            net_fc = DenseLayer(net_fc,
                                n_units=100,
                                act=tf.nn.relu,
                                name="fc_2")

            net_fc = DropoutLayer(net_fc,
                                  keep=0.5,
                                  is_fix=True,
                                  is_train=is_train,
                                  name='drop3')

            net_fc = DenseLayer(net_fc,
                                n_units=self.number_of_seen_classes,
                                act=tf.nn.relu,
                                name="fc_3")

        return net_fc
    def __get_network__(self, model_name, encode_seqs, class_label_seqs, kg_vector, reuse=False, is_train=True):
        with tf.variable_scope(model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_word_embed = InputLayer(
                inputs=encode_seqs,
                name="in_word_embed"
            )

            net_class_label_embed = InputLayer(
                inputs=class_label_seqs,
                name="in_class_label_embed"
            )

            net_kg = InputLayer(
                inputs=kg_vector,
                name='in_kg'
            )

            net_kg = ReshapeLayer(
                net_kg,
                shape=(-1, self.kg_embedding_dim),
                name="reshape_kg_1"
            )

            net_kg = ReshapeLayer(
                net_kg,
                shape=(-1, self.max_length, self.kg_embedding_dim),
                name="reshape_kg_2"
            )

            if config.model == "vwvcvkg":
                # dbpedia and 20news
                net_in = ConcatLayer(
                    [net_word_embed, net_class_label_embed, net_kg],
                    concat_dim=-1,
                    name='concat_vw_vwc_vc'
                )
            elif config.model == "vwvc":
                net_in = ConcatLayer(
                    [net_word_embed, net_class_label_embed],
                    concat_dim=-1,
                    name='concat_vw_vc'
                )
            elif config.model == "vwvkg":
                net_in = ConcatLayer(
                    [net_word_embed, net_kg],
                    concat_dim=-1,
                    name='concat_vw_vwc'
                )
            elif config.model == "vcvkg":
                net_in = ConcatLayer(
                    [net_class_label_embed, net_kg],
                    concat_dim=-1,
                    name='concat_vc_vwc'
                )
            elif config.model == "kgonly":
                net_in = ConcatLayer(
                    [net_kg],
                    concat_dim=-1,
                    name='concat_vwc'
                )
            else:
                raise Exception("config.model value error")

            filter_length = [2, 4, 8]
            # dbpedia
            n_filter = 600
            # n_filter = 200

            net_cnn_list = list()

            for fsz in filter_length:

                net_cnn = Conv1d(
                    net_in,
                    n_filter=n_filter,
                    filter_size=fsz,
                    stride=1,
                    act=tf.nn.relu,
                    name="cnn%d" % fsz
                )
                net_cnn.outputs = tf.reduce_max(net_cnn.outputs, axis=1, name="global_maxpool%d" % fsz)
                net_cnn_list.append(net_cnn)

            '''
            if config.model == "vwvcvkg":
                net_class_label_embed.outputs = tf.slice(
                    net_class_label_embed.outputs,
                    [0, 0, 0],
                    [config.batch_size, 1, self.word_embedding_dim],
                    name="slice_word"
                )
                net_class_label_embed.outputs = tf.squeeze(
                    net_class_label_embed.outputs,
                    name="squeeze_word"
                )
                net_cnn = ConcatLayer(net_cnn_list + [net_class_label_embed], concat_dim=-1)
            else:
                net_cnn = ConcatLayer(net_cnn_list, concat_dim=-1)
            '''
            net_cnn = ConcatLayer(net_cnn_list, concat_dim=-1)

            net_fc = DropoutLayer(net_cnn, keep=0.5, is_fix=True, is_train=is_train, name='drop1')

            net_fc = DenseLayer(
                net_fc,
                n_units=400,
                act=tf.nn.relu,
                name="fc_1"
            )

            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop2')

            # dbpedia
            net_fc = DenseLayer(
                net_fc,
                n_units=100,
                act=tf.nn.relu,
                name="fc_2"
            )
            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop3')

            net_fc = DenseLayer(
                net_fc,
                n_units=1,
                act=tf.nn.sigmoid,
                name="fc_3"
            )
        return net_fc, net_cnn
    def __get_network_rnnfc__(self, model_name, encode_seqs, class_label_seqs, reuse=False, is_train=True):
        with tf.variable_scope(model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_word_embed = InputLayer(
                inputs=encode_seqs,
                name="in_word_embed"
            )

            net_class_label_embed = InputLayer(
                inputs=class_label_seqs,
                name="in_class_label_embed"
            )

            net_class_label_embed.outputs = tf.slice(
                net_class_label_embed.outputs,
                [0, 0, 0],
                [config.batch_size, 1, self.word_embedding_dim],
                name="slice_word"
            )

            net_class_label_embed.outputs = tf.squeeze(
                net_class_label_embed.outputs,
                name="squeeze_word"
            )

            net_in = ConcatLayer(
                [net_word_embed],
                concat_dim=-1,
                name='concat_vw'
            )

            net_rnn = RNNLayer(
                net_in,
                cell_fn=tf.contrib.rnn.BasicLSTMCell,
                n_hidden = 512,
                n_steps = self.max_length,
                return_last = True,
                name = 'lstm'
            )

            net_fc = ConcatLayer([net_rnn, net_class_label_embed], concat_dim=-1)

            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop1')

            net_fc = DenseLayer(
                net_fc,
                n_units=400,
                act=tf.nn.relu,
                name="fc_1"
            )

            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop2')

            # dbpedia
            net_fc = DenseLayer(
                net_fc,
                n_units=100,
                act=tf.nn.relu,
                name="fc_2"
            )
            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop3')

            net_fc = DenseLayer(
                net_fc,
                n_units=1,
                act=tf.nn.sigmoid,
                name="fc_3"
            )
        return net_fc
    def __get_network_cnnfc__(self, model_name, encode_seqs, class_label_seqs, reuse=False, is_train=True):
        with tf.variable_scope(model_name, reuse=reuse):
            tl.layers.set_name_reuse(reuse)

            net_word_embed = InputLayer(
                inputs=encode_seqs,
                name="in_word_embed"
            )

            net_class_label_embed = InputLayer(
                inputs=class_label_seqs,
                name="in_class_label_embed"
            )

            net_class_label_embed.outputs = tf.slice(
                net_class_label_embed.outputs,
                [0, 0, 0],
                [config.batch_size, 1, self.word_embedding_dim],
                name="slice_word"
            )

            net_class_label_embed.outputs = tf.squeeze(
                net_class_label_embed.outputs,
                name="squeeze_word"
            )

            net_in = ConcatLayer(
                [net_word_embed],
                concat_dim=-1,
                name='concat_vw'
            )

            filter_length = [2, 4, 8]
            # dbpedia
            n_filter = 600
            # n_filter = 200

            net_cnn_list = list()

            for fsz in filter_length:

                net_cnn = Conv1d(
                    net_in,
                    n_filter=n_filter,
                    filter_size=fsz,
                    stride=1,
                    act=tf.nn.relu,
                    name="cnn%d" % fsz
                )
                net_cnn.outputs = tf.reduce_max(net_cnn.outputs, axis=1, name="global_maxpool%d" % fsz)
                net_cnn_list.append(net_cnn)

            net_cnn = ConcatLayer(net_cnn_list + [net_class_label_embed], concat_dim=-1)

            net_fc = DropoutLayer(net_cnn, keep=0.5, is_fix=True, is_train=is_train, name='drop1')

            net_fc = DenseLayer(
                net_fc,
                n_units=400,
                act=tf.nn.relu,
                name="fc_1"
            )

            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop2')

            # dbpedia
            net_fc = DenseLayer(
                net_fc,
                n_units=100,
                act=tf.nn.relu,
                name="fc_2"
            )
            net_fc = DropoutLayer(net_fc, keep=0.5, is_fix=True, is_train=is_train, name='drop3')

            net_fc = DenseLayer(
                net_fc,
                n_units=1,
                act=tf.nn.sigmoid,
                name="fc_3"
            )
        return net_fc
def squeezenet(x, is_train=True, reuse=False):
    # model from: https://github.com/wohlert/keras-squeezenet
    #             https://github.com/DT42/squeezenet_demo/blob/master/model.py
    with tf.variable_scope("squeezenet", reuse=reuse):
        with tf.variable_scope("input"):
            n = InputLayer(x)
            # n = Conv2d(n, 96, (7,7),(2,2),tf.nn.relu,'SAME',name='conv1')
            n = Conv2d(n, 64, (3, 3), (2, 2), tf.nn.relu, 'SAME', name='conv1')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire2"):
            n = Conv2d(n,
                       16, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        64, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        64, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire3"):
            n = Conv2d(n,
                       16, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        64, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        64, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire4"):
            n = Conv2d(n,
                       32, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        128, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire5"):
            n = Conv2d(n,
                       32, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        128, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        128, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')
            n = MaxPool2d(n, (3, 3), (2, 2), 'VALID', name='max')

        with tf.variable_scope("fire6"):
            n = Conv2d(n,
                       48, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        192, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        192, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire7"):
            n = Conv2d(n,
                       48, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        192, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        192, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire8"):
            n = Conv2d(n,
                       64, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        256, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        256, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("fire9"):
            n = Conv2d(n,
                       64, (1, 1), (1, 1),
                       tf.nn.relu,
                       'SAME',
                       name='squeeze1x1')
            n1 = Conv2d(n,
                        256, (1, 1), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand1x1')
            n2 = Conv2d(n,
                        256, (3, 3), (1, 1),
                        tf.nn.relu,
                        'SAME',
                        name='expand3x3')
            n = ConcatLayer([n1, n2], -1, name='concat')

        with tf.variable_scope("output"):
            n = DropoutLayer(n,
                             keep=0.5,
                             is_fix=True,
                             is_train=is_train,
                             name='drop1')
            n = Conv2d(n, 1000, (1, 1), (1, 1), padding='VALID',
                       name='conv10')  # 13, 13, 1000
            n = GlobalMeanPool2d(n)
        return n
Example #10
0
File: ATDA.py Project: xmhGit/ATDA
    def create_model(self):

        if self.name == 'mnist-mnistm':
            drop_prob = {'Ft': 0.2, 'F1': 0.5, 'F2': 0.5}
            self.x = tf.placeholder(tf.float32, shape=[None, 28, 28, 3])
            self.y_ = tf.placeholder(tf.float32, shape=[None, 10])
            self.istrain = tf.placeholder(tf.bool, shape=[])

            _input = InputLayer(self.x, name='input_layer')
            _shared_net = _input
            _shared_net = Conv2d(_shared_net,
                                 n_filter=32,
                                 filter_size=(5, 5),
                                 strides=(1, 1),
                                 act=tf.nn.relu,
                                 padding='SAME',
                                 name='cnn1')
            _shared_net = MaxPool2d(_shared_net,
                                    filter_size=(2, 2),
                                    strides=(2, 2),
                                    padding='SAME',
                                    name='pool_layer1')

            _shared_net = Conv2d(_shared_net,
                                 n_filter=48,
                                 filter_size=(5, 5),
                                 strides=(1, 1),
                                 act=tf.identity,
                                 padding='SAME',
                                 name='cnn2')
            _shared_net = BatchNormLayer(_shared_net,
                                         is_train=True,
                                         act=tf.nn.relu)
            _shared_net = MaxPool2d(_shared_net,
                                    filter_size=(2, 2),
                                    strides=(2, 2),
                                    padding='SAME',
                                    name='pool_layer2')
            _shared_net = FlattenLayer(_shared_net)

            feature = _shared_net.outputs

            _F1_net = _shared_net
            _F2_net = _shared_net
            _Ft_net = _shared_net

            with tf.variable_scope("F1") as scope:
                _F1_net = DropoutLayer(_F1_net,
                                       keep=drop_prob['F1'],
                                       name='drop1',
                                       is_fix=True,
                                       is_train=self.istrain)
                _F1_net = DenseLayer(_F1_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu1')
                _F1_net = BatchNormLayer(_F1_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn1')
                _F1_net = DropoutLayer(_F1_net,
                                       keep=drop_prob['F1'],
                                       name='drop2',
                                       is_fix=True,
                                       is_train=self.istrain)
                _F1_net = DenseLayer(_F1_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu2')
                _F1_net = BatchNormLayer(_F1_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn2')
                _F1_net = DenseLayer(_F1_net,
                                     n_units=10,
                                     act=tf.nn.softmax,
                                     name='output')
                self.F1_out = _F1_net.outputs

            with tf.variable_scope("F2") as scope:

                _F2_net = DropoutLayer(_F2_net,
                                       keep=drop_prob['F2'],
                                       name='drop1',
                                       is_fix=True,
                                       is_train=self.istrain)
                _F2_net = DenseLayer(_F2_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu1')
                _F2_net = BatchNormLayer(_F2_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn1')
                _F2_net = DropoutLayer(_F2_net,
                                       keep=drop_prob['F2'],
                                       name='drop2',
                                       is_fix=True,
                                       is_train=self.istrain)
                _F2_net = DenseLayer(_F2_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu2')
                _F2_net = BatchNormLayer(_F2_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn2')
                _F2_net = DenseLayer(_F2_net,
                                     n_units=10,
                                     act=tf.nn.softmax,
                                     name='output')
                self.F2_out = _F2_net.outputs

            with tf.variable_scope("Ft") as scope:
                _Ft_net = DropoutLayer(_Ft_net,
                                       keep=drop_prob['Ft'],
                                       name='drop1',
                                       is_fix=True,
                                       is_train=self.istrain)
                _Ft_net = DenseLayer(_Ft_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu1')
                _Ft_net = BatchNormLayer(_Ft_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn1')
                _Ft_net = DropoutLayer(_Ft_net,
                                       keep=drop_prob['Ft'],
                                       name='drop2',
                                       is_fix=True,
                                       is_train=self.istrain)
                _Ft_net = DenseLayer(_Ft_net,
                                     n_units=100,
                                     act=tf.identity,
                                     name='relu2')
                _Ft_net = BatchNormLayer(_Ft_net,
                                         is_train=True,
                                         act=tf.nn.relu,
                                         name='bn2')
                _Ft_net = DenseLayer(_Ft_net,
                                     n_units=10,
                                     act=tf.nn.softmax,
                                     name='output')
                self.Ft_out = _Ft_net.outputs

            #self.cost = cross_entropy(F1_out,self.y_,name='F1_loss')#+cross_entropy(F2_out,self.y_,name='F2_loss')+cross_entropy(Ft_out,self.y_,name='Ft_loss')
            self.F1_loss = -tf.reduce_mean(self.y_ * tf.log(self.F1_out))
            self.F2_loss = -tf.reduce_mean(self.y_ * tf.log(self.F2_out))
            self.Ft_loss = -tf.reduce_mean(self.y_ * tf.log(self.Ft_out))
            self.F1_acc = tf.reduce_mean(
                tf.cast(
                    tf.equal(tf.argmax(self.F1_out, 1), tf.argmax(self.y_, 1)),
                    tf.float32))
            self.F2_acc = tf.reduce_mean(
                tf.cast(
                    tf.equal(tf.argmax(self.F2_out, 1), tf.argmax(self.y_, 1)),
                    tf.float32))
            self.Ft_acc = tf.reduce_mean(
                tf.cast(
                    tf.equal(tf.argmax(self.Ft_out, 1), tf.argmax(self.y_, 1)),
                    tf.float32))

            self.cost = self.F1_loss + self.F2_loss + self.Ft_loss + tf.reduce_sum(
                tf.abs(
                    tf.multiply(tf.transpose(_F1_net.all_params[14]),
                                _F2_net.all_params[14])))
            self.labeling_cost = self.F1_loss + self.F2_loss + tf.reduce_sum(
                tf.abs(
                    tf.multiply(tf.transpose(_F1_net.all_params[14]),
                                _F2_net.all_params[14])))
            self.targetspecific_cost = self.Ft_loss

            self.F1F2Ft_op = tf.train.AdamOptimizer(
                learning_rate=0.01).minimize(self.cost)
            self.F1F2_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(
                self.labeling_cost)
            self.Ft_op = tf.train.AdamOptimizer(learning_rate=0.01).minimize(
                self.targetspecific_cost)

            tl.layers.initialize_global_variables(self.sess)
            print '********************************************************************************************************************************************************'
            _shared_net.print_params()
            _shared_net.print_layers()
            print '********************************************************************************************************************************************************'
            _F1_net.print_params()
            _F1_net.print_layers()
            print '********************************************************************************************************************************************************'
            _F2_net.print_params()
            _F2_net.print_layers()
            print '********************************************************************************************************************************************************'
            _Ft_net.print_params()
            _Ft_net.print_layers()