예제 #1
0
def eq_cifar_fn(x, output_dim=10, trainable=True):
    gconv_indices, gconv_shape_info, w_shape = gconv2d_util(h_input='Z2',
                                                            h_output='C4',
                                                            in_channels=3,
                                                            out_channels=8,
                                                            ksize=3)
    w = tf.get_variable('w1', shape=w_shape)

    conv1 = gconv2d(input=x,
                    filter=w,
                    strides=[1, 2, 2, 1],
                    padding='SAME',
                    gconv_indices=gconv_indices,
                    gconv_shape_info=gconv_shape_info)
    tf.add_to_collection('conv_output1', conv1)
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
    gconv_indices, gconv_shape_info, w_shape = gconv2d_util(h_input='C4',
                                                            h_output='C4',
                                                            in_channels=16,
                                                            out_channels=32,
                                                            ksize=5)
    w = tf.get_variable('w2', shape=w_shape)
    conv2 = gconv2d(input=conv1,
                    filter=w,
                    strides=[1, 2, 2, 1],
                    padding='SAME',
                    gconv_indices=gconv_indices,
                    gconv_shape_info=gconv_shape_info)
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
    gconv_indices, gconv_shape_info, w_shape = gconv2d_util(h_input='C4',
                                                            h_output='C4',
                                                            in_channels=8,
                                                            out_channels=2,
                                                            ksize=5)
    w = tf.get_variable('w3', shape=w_shape)
    conv3 = gconv2d(input=conv2,
                    filter=w,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    gconv_indices=gconv_indices,
                    gconv_shape_info=gconv_shape_info)
    conv3 = tf.reshape(conv3,
                       conv3.get_shape().as_list()[:3] + [4] + [out_channels])
    conv3 = tf.reduce_mean(conv3, axis=3)
    pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
    pool3_flat = tf.layers.flatten(pool3)
    u = pool3_flat
    u = tf.layers.dense(inputs=pool3_flat,
                        units=output_dim,
                        activation=tf.nn.relu,
                        trainable=trainable)
    tf.add_to_collection('conv_output2', conv2)
    return u
예제 #2
0
def eq_cnn_fn(x, output_dim=10, trainable=True, group='C4', num_filters=2):
    nchannels = x.shape[3]
    gconv_indices, gconv_shape_info, w_shape = gconv2d_util(
        h_input='Z2',
        h_output='C4',
        in_channels=nchannels,
        out_channels=2,
        ksize=5)
    w = tf.get_variable('w1', shape=w_shape)

    conv1 = gconv2d(input=x,
                    filter=w,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    gconv_indices=gconv_indices,
                    gconv_shape_info=gconv_shape_info)
    tf.add_to_collection('conv_output1', conv1)
    pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)

    # pool1 = layers.Dropout(0.25)(pool1)
    out_channels = 2
    gconv_indices, gconv_shape_info, w_shape = gconv2d_util(
        h_input='C4',
        h_output='C4',
        in_channels=2,
        out_channels=out_channels,
        ksize=5)
    w = tf.get_variable('w2', shape=w_shape)
    conv2 = gconv2d(input=conv1,
                    filter=w,
                    strides=[1, 1, 1, 1],
                    padding='SAME',
                    gconv_indices=gconv_indices,
                    gconv_shape_info=gconv_shape_info)
    conv2 = tf.reshape(conv2,
                       conv2.get_shape().as_list()[:3] + [4] + [out_channels])
    conv2 = tf.reduce_mean(conv2, axis=3)
    conv2 = tf.reshape(conv2, conv2.get_shape().as_list()[:3] + [out_channels])
    pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)

    pool2_flat = tf.layers.flatten(pool2)
    u = pool2_flat
    print(u.shape)
    u = tf.layers.dense(inputs=pool2_flat,
                        units=output_dim,
                        activation=tf.nn.relu,
                        trainable=trainable)
    tf.add_to_collection('conv_output2', conv2)
    return u
예제 #3
0
    def build(self, input_shape):
        dim = input_shape[self.axis]
        if dim is None:
            raise ValueError(
                'Axis ' + str(self.axis) + ' of '
                'input tensor should have a defined dimension '
                'but the layer received an input with shape ' +
                str(input_shape) + '.', )
        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})

        self.gconv_indices, self.gconv_shape_info, w_shape = gconv2d_util(
            h_input=self.h,
            h_output=self.h,
            in_channels=input_shape[-1],
            out_channels=input_shape[-1],
            ksize=1,
        )
        if self.h == 'C4':
            dim //= 4
        elif self.h == 'D4':
            dim //= 8
        shape = (dim, )

        self.beta = self.add_weight(shape=shape,
                                    name='beta',
                                    initializer=self.beta_initializer)

        self.broadcast_shape = [1] * len(input_shape)
        self.broadcast_shape[self.axis] = input_shape[self.axis]

        self.built = True
예제 #4
0
def make_graph(h_input, h_output):
    gconv_indices, gconv_shape_info, w_shape = gconv2d_util(
        h_input=h_input, h_output=h_output, in_channels=1, out_channels=1, ksize=3)
    nti = gconv_shape_info[-2]
    x = tf.placeholder(tf.float32, [None, 5, 5, 1 * nti])
    w = tf.Variable(tf.truncated_normal(w_shape, stddev=1.))
    y = gconv2d(input=x, filter=w, strides=[1, 1, 1, 1], padding='SAME',
                gconv_indices=gconv_indices, gconv_shape_info=gconv_shape_info)
    return x, y
예제 #5
0
    def build(self, input_shape):
        self.gconv_indices, self.gconv_shape_info, self.w_shape = gconv2d_util(
            h_input=self.input_group,
            h_output=self.output_group,
            in_channels=self.input_channels,
            out_channels=self.output_channels,
            ksize=self.ksize)

        self.w = self.add_weight(shape=self.w_shape,
                                 initializer='random_normal',
                                 trainable=True)
예제 #6
0
    def build(self, input_shape):
        dim = input_shape[self.axis]
        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')
        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})

        self.gconv_indices, self.gconv_shape_info, w_shape = gconv2d_util(
            h_input=self.h,
            h_output=self.h,
            in_channels=input_shape[-1],
            out_channels=input_shape[-1],
            ksize=1)
        if self.h == 'C4':
            dim //= 4
        elif self.h == 'D4':
            dim //= 8
        shape = (dim, )

        self.gamma = self.add_weight(shape=shape,
                                     name='gamma',
                                     initializer=self.gamma_initializer,
                                     regularizer=self.gamma_regularizer,
                                     constraint=self.gamma_constraint,
                                     trainable=self.scale)

        self.beta = self.add_weight(shape=shape,
                                    name='beta',
                                    initializer=self.beta_initializer,
                                    regularizer=self.beta_regularizer,
                                    constraint=self.beta_constraint,
                                    trainable=self.center)

        self.moving_mean = self.add_weight(
            shape=shape,
            name='moving_mean',
            initializer=self.moving_mean_initializer,
            trainable=False)

        self.moving_variance = self.add_weight(
            shape=shape,
            name='moving_variance',
            initializer=self.moving_variance_initializer,
            trainable=False)

        self.built = True
def make_graph(h_input, h_output):
    gconv_indices, gconv_shape_info, w_shape = gconv2d_util(h_input=h_input,
                                                            h_output=h_output,
                                                            in_channels=1,
                                                            out_channels=1,
                                                            ksize=3)
    nti = gconv_shape_info[-2]
    x = tf.placeholder(tf.float32, [None, 5, 5, 1 * nti])
    w = tf.Variable(tf.truncated_normal(w_shape, stddev=1.))
    y = gconv2d(input=x,
                filter=w,
                strides=[1, 1, 1, 1],
                padding='SAME',
                gconv_indices=gconv_indices,
                gconv_shape_info=gconv_shape_info)
    return x, y
예제 #8
0
def gconv_bn_act(x,
                 gconv_type,
                 C_in,
                 C_out,
                 ksize=3,
                 padding='VALID',
                 bn=True,
                 is_train=True,
                 act=tf.nn.relu,
                 name='gconv_bn_act',
                 reuse=False):
    if gconv_type == 'Z2_to_P4':
        h_in = 'Z2'
        h_out = 'C4'
    elif gconv_type == 'Z2_to_P4M':
        h_in = 'Z2'
        h_out = 'D4'
    elif gconv_type == 'P4_to_P4':
        h_in = 'C4'
        h_out = 'C4'
    elif gconv_type == 'P4M_to_P4M':
        h_in = 'D4'
        h_out = 'D4'
    else:
        raise NotImplemented('Unsupported gconv_type: {}!'.format(gconv_type))
    with tf.variable_scope(name, reuse=reuse) as vs:
        gconv_indices, gconv_shape_info, w_shape = gconv2d_util(
            h_input=h_in,
            h_output=h_out,
            in_channels=C_in,
            out_channels=C_out,
            ksize=ksize)
        # w = tf.Variable(tf.truncated_normal(w_shape, stddev=1.), name='kernel')
        w = tf.get_variable("kernel",
                            shape=w_shape,
                            initializer=tf.contrib.layers.xavier_initializer()
                            )  # initialization is very important!!!
        x = gconv2d(input=x,
                    filter=w,
                    strides=[1, 1, 1, 1],
                    padding=padding,
                    gconv_indices=gconv_indices,
                    gconv_shape_info=gconv_shape_info,
                    use_cudnn_on_gpu=True)
        if bn:
            x = tf.layers.batch_normalization(x, training=is_train)
        return act(x)
예제 #9
0
 def _gconv(self, name, x, filter_size, in_filters, out_filters, strides):
     """G - Convolution."""
     gconv_indices, gconv_shape_info, w_shape = gconv2d_util(
         h_input='D4',
         h_output='D4',
         in_channels=in_filters,
         out_channels=out_filters,
         ksize=3)
     w = tf.Variable(tf.truncated_normal(w_shape, stddev=1.))
     x = gconv2d(input=x,
                 filter=w,
                 strides=strides,
                 padding='SAME',
                 gconv_indices=gconv_indices,
                 gconv_shape_info=gconv_shape_info,
                 name=name)
     return x
예제 #10
0
    def g_conv_no_decorator(self,
                            input,
                            input_type,
                            output_type,
                            kernel_size,
                            c_o,
                            s_h,
                            s_w,
                            name,
                            padding=DEFAULT_PADDING,
                            group=1):
        # Verify that the padding is acceptable
        self.validate_padding(padding)

        c_i = np.int32(input.get_shape()[-1])

        if input_type == 'Z2':
            c_i = c_i
        elif input_type == 'C4':
            c_i = c_i // 4
        else:
            c_i = c_i // 8

        # output = GConv2D(c_o, (kernel_size, kernel_size), kernel_initializer='he_normal', padding=padding, strides=(s_h, s_w), use_bias=False, h_input=input_type, h_output=output_type, name=name)(input)

        with tf.variable_scope(name) as scope:

            gconv_indices, gconv_shape_info, w_shape = gconv2d_util(
                h_input=input_type,
                h_output=output_type,
                in_channels=c_i,
                out_channels=c_o,
                ksize=kernel_size)
            # w = tf.Variable(tf.truncated_normal(w_shape, stddev=1.))
            w = self.make_var(name='weight', shape=w_shape)
            output = gconv2d(input=input,
                             filter=w,
                             strides=[1, 1, 1, 1],
                             padding='SAME',
                             gconv_indices=gconv_indices,
                             gconv_shape_info=gconv_shape_info)

            return output
예제 #11
0
    def _build_model(self,
                     config,
                     filters,
                     num_ids,
                     differentiable=False,
                     adversarial_ce=False,
                     nat_ce=False,
                     pad_mode='CONSTANT',
                     pad_size=32):
        """Build the core model within the graph."""
        with tf.variable_scope('input'):

            self.group = tf.placeholder(tf.int32, [None], name="group")
            self.num_ids = num_ids

            self.x_input = tf.placeholder(tf.float32, shape=[None, 32, 32, 3])
            self.y_input = tf.placeholder(tf.int64, shape=None)

            self.transform = tf.placeholder(tf.float32, shape=[None, 3])
            trans_x, trans_y, rot = tf.unstack(self.transform, axis=1)
            rot *= np.pi / 180  # convert degrees to radians

            self.is_training = tf.placeholder(tf.bool)

            x = self.x_input
            x = tf.pad(x, [[0, 0], [16, 16], [16, 16], [0, 0]], pad_mode)

            if not differentiable:
                # For spatial non-PGD attacks: rotate and translate image
                ones = tf.ones(shape=tf.shape(trans_x))
                zeros = tf.zeros(shape=tf.shape(trans_x))
                trans = tf.stack([
                    ones, zeros, -trans_x, zeros, ones, -trans_y, zeros, zeros
                ],
                                 axis=1)
                x = tf.contrib.image.rotate(x, rot, interpolation='BILINEAR')
                x = tf.contrib.image.transform(x,
                                               trans,
                                               interpolation='BILINEAR')
            else:
                # for spatial PGD attacks need to use diffble transformer
                theta = tf.stack([
                    tf.cos(rot), -tf.sin(rot), trans_x / 64,
                    tf.sin(rot),
                    tf.cos(rot), trans_y / 64
                ],
                                 axis=1)
                x = transformer(x, theta, (64, 64))
            x = tf.image.resize_image_with_crop_or_pad(x, pad_size, pad_size)

            # everything below this point is generic (independent of spatial attacks)
            self.x_image = x
            x = tf.map_fn(lambda img: tf.image.per_image_standardization(img),
                          x)

            gconv_indices, gconv_shape_info, w_shape = gconv2d_util(
                h_input='Z2',
                h_output='D4',
                in_channels=3,
                out_channels=filters[0],
                ksize=3)

            w = tf.Variable(tf.truncated_normal(w_shape, stddev=1.))
            x = gconv2d(input=x,
                        filter=w,
                        strides=[1, 1, 1, 1],
                        padding='SAME',
                        gconv_indices=gconv_indices,
                        gconv_shape_info=gconv_shape_info)

            # x = self._conv('init_conv', x, 3, 3, 16, self._stride_arr(1))

        # "filters": [16, 16, 32, 64],

        strides = [1, 2, 2]
        activate_before_residual = [True, False, False]
        res_func = self._residual

        with tf.variable_scope('unit_1_0'):
            x = res_func(x, filters[0], filters[1],
                         self._stride_arr(strides[0]),
                         activate_before_residual[0])
        for i in range(1, config.resnet_depth_n):
            with tf.variable_scope('unit_1_%d' % i):
                x = res_func(x, filters[1], filters[1], self._stride_arr(1),
                             False)

        with tf.variable_scope('unit_2_0'):
            x = res_func(x, filters[1], filters[2],
                         self._stride_arr(strides[1]),
                         activate_before_residual[1])
        for i in range(1, config.resnet_depth_n):
            with tf.variable_scope('unit_2_%d' % i):
                x = res_func(x, filters[2], filters[2], self._stride_arr(1),
                             False)

        with tf.variable_scope('unit_3_0'):
            x = res_func(x, filters[2], filters[3],
                         self._stride_arr(strides[2]),
                         activate_before_residual[2])
        for i in range(1, config.resnet_depth_n):
            with tf.variable_scope('unit_3_%d' % i):
                x = res_func(x, filters[3], filters[3], self._stride_arr(1),
                             False)

        with tf.variable_scope('unit_last'):
            x = self._batch_norm('final_bn', x)
            x = self._relu(x, 0.1)
            x = self._global_avg_pool(x)

        # uncomment to add and extra fc layer
        #with tf.variable_scope('unit_fc'):
        #  self.pre_softmax = self._fully_connected(x, 1024)
        #  x = self._relu(x, 0.1)

        with tf.variable_scope('logit'):
            self.pre_softmax = self._fully_connected(x, config.n_classes)

        self.predictions = tf.argmax(self.pre_softmax, 1)
        self.correct_prediction = tf.equal(self.predictions, self.y_input)
        self.num_correct = tf.reduce_sum(
            tf.cast(self.correct_prediction, tf.int64))
        self.accuracy = tf.reduce_mean(
            tf.cast(self.correct_prediction, tf.float32))

        with tf.variable_scope('costs'):
            if adversarial_ce:
                indices_adv = tf.cast(
                    tf.range(self.num_ids,
                             tf.shape(self.pre_softmax)[0]), tf.int32)
                adversarial_ex_presoft = tf.gather(self.pre_softmax,
                                                   indices_adv)
                adversarial_ex_y = tf.gather(self.y_input, indices_adv)
                self.y_xent_for_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=adversarial_ex_presoft, labels=adversarial_ex_y)
                self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=self.pre_softmax, labels=self.y_input)
            elif nat_ce:
                print("******************pure_nat_loss/n")
                indices_nat = tf.cast(
                    tf.range(tf.shape(self.pre_softmax)[0] - self.num_ids),
                    tf.int32)
                nat_ex_presoft = tf.gather(self.pre_softmax, indices_nat)
                nat_ex_y = tf.gather(self.y_input, indices_nat)
                self.y_xent_for_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=nat_ex_presoft, labels=nat_ex_y)
                self.y_xent = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=self.pre_softmax, labels=self.y_input)
            else:
                self.y_xent_for_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                    logits=self.pre_softmax, labels=self.y_input)
                self.y_xent = self.y_xent_for_loss

            self.xent = tf.reduce_sum(self.y_xent, name='y_xent')
            self.mean_xent = tf.reduce_mean(self.y_xent_for_loss)
            self.weight_decay_loss = self._decay()
            #TRADES penalty
            # self.core_loss = self._CoRe()
            self.core_loss2 = self._CoRe_2tensors()
예제 #12
0
    def build(self, input_shape):
        dim = input_shape[self.axis]
        if dim is None:
            raise ValueError('Axis ' + str(self.axis) + ' of '
                             'input tensor should have a defined dimension '
                             'but the layer received an input with shape ' +
                             str(input_shape) + '.')
        self.input_spec = InputSpec(ndim=len(input_shape),
                                    axes={self.axis: dim})

        self.gconv_indices, self.gconv_shape_info, w_shape = gconv2d_util(
            h_input=self.h,
            h_output=self.h,
            in_channels=input_shape[-1],
            out_channels=input_shape[-1],
            ksize=1)
        if self.h == 'C4':
            dim //= 4
        elif self.h == 'D4':
            dim //= 8
        shape = (dim, )

        if self.scale:
            self.gamma = self.add_weight(shape=shape,
                                         name='gamma',
                                         initializer=self.gamma_initializer,
                                         regularizer=self.gamma_regularizer,
                                         constraint=self.gamma_constraint)
        else:
            self.gamma = None
        if self.center:
            self.beta = self.add_weight(shape=shape,
                                        name='beta',
                                        initializer=self.beta_initializer,
                                        regularizer=self.beta_regularizer,
                                        constraint=self.beta_constraint)
        else:
            self.beta = None
        self.moving_mean = self.add_weight(
            shape=shape,
            name='moving_mean',
            initializer=self.moving_mean_initializer,
            trainable=False)
        self.moving_variance = self.add_weight(
            shape=shape,
            name='moving_variance',
            initializer=self.moving_variance_initializer,
            trainable=False)

        def repeat(w):
            n = 1
            if self.h == 'C4':
                n *= 4
            elif self.h == 'D4':
                n *= 8
            elif self.h == 'Z2':
                n *= 1
            else:
                raise ValueError('Wrong h: %s' % self.h)

            return K.reshape(K.tile(K.expand_dims(w, -1), [1, n]), [-1])

        self.repeated_gamma = repeat(self.gamma)
        self.repeated_beta = repeat(self.beta)

        self.repeated_moving_mean = repeat(self.moving_mean)
        self.repeated_moving_variance = repeat(self.moving_variance)
        self.built = True