Ejemplo n.º 1
0
 def __init__(self):
     super(SimplerCNN, self).__init__()
     self.dropout2d_input = nn.Dropout2d(rate=0.3)
     self.conv1 = nn.Conv2d(in_channels=3,
                            out_channels=15,
                            kernel_size=3,
                            stride=3,
                            padding=2)
     self.relu1 = nn.LeakyRelu()
     self.conv2 = nn.Conv2d(in_channels=15,
                            out_channels=30,
                            kernel_size=3,
                            stride=3,
                            padding=3)
     self.relu2 = nn.LeakyRelu()
     self.dropout2d_conv1 = nn.Dropout2d(rate=0.5)
     self.conv3 = nn.Conv2d(in_channels=30, out_channels=40, kernel_size=4)
     self.relu3 = nn.LeakyRelu()
     self.flatten = nn.Flatten()
     self.dropout2d_conv2 = nn.Dropout2d(rate=0.2)
     self.linear = nn.Linear(in_dimension=360, out_dimension=180)
     self.relu4 = nn.LeakyRelu()
     self.bn1 = nn.BatchNorm()
     self.dropout3 = nn.Dropout(rate=0.3)
     self.linear2 = nn.Linear(in_dimension=180, out_dimension=10)
     self.bn2 = nn.BatchNorm()
     self.softmax = nn.Softmax()
     self.set_forward()
Ejemplo n.º 2
0
        def __init__(self, image_size=64, width=64, zdim=128):
            super(SketchVAE.ImageEncoder, self).__init__()
            self.zdim = zdim

            self.net = th.nn.Sequential(
                th.nn.Conv2d(4, width, 5, padding=2),
                th.nn.InstanceNorm2d(width),
                th.nn.ReLU(inplace=True),
                # 64x64
                th.nn.Conv2d(width, width, 5, padding=2),
                th.nn.InstanceNorm2d(width),
                th.nn.ReLU(inplace=True),
                # 64x64
                th.nn.Conv2d(width, 2 * width, 5, stride=1, padding=2),
                th.nn.InstanceNorm2d(2 * width),
                th.nn.ReLU(inplace=True),
                # 32x32
                th.nn.Conv2d(2 * width, 2 * width, 5, stride=2, padding=2),
                th.nn.InstanceNorm2d(2 * width),
                th.nn.ReLU(inplace=True),
                # 16x16
                th.nn.Conv2d(2 * width, 2 * width, 5, stride=2, padding=2),
                th.nn.InstanceNorm2d(2 * width),
                th.nn.ReLU(inplace=True),
                # 16x16
                th.nn.Conv2d(2 * width, 2 * width, 5, stride=2, padding=2),
                th.nn.InstanceNorm2d(2 * width),
                th.nn.ReLU(inplace=True),
                # 8x8
                th.nn.Conv2d(2 * width, 2 * width, 5, stride=2, padding=2),
                th.nn.InstanceNorm2d(2 * width),
                th.nn.ReLU(inplace=True),
                # 4x4
                modules.Flatten(),
                th.nn.Linear(4 * 4 * 2 * width, 2 * zdim))
Ejemplo n.º 3
0
        def conv_layer(x):
            """
            the derivative check in the gradient checker relates to the input of the function
            hence, the input should be z - since the backward step computes @loss / @z
            """

            conv1 = nn.Conv2d(in_channels=1, out_channels=2, kernel_size=2)
            relu1 = nn.Relu()
            conv2 = nn.Conv2d(in_channels=2, out_channels=4, kernel_size=2)
            relu2 = nn.Relu()
            flatten = nn.Flatten()
            linear = nn.Linear(4, 2)
            softmax = nn.Softmax()

            # forward pass
            a = relu1(conv1(x))
            a = relu2(conv2(a))
            a_flatten = flatten(a)
            dist = softmax(linear(a_flatten))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            flatten_grad = flatten.backward(linear_grad)
            relu2_grad = relu2.backward(flatten_grad)
            conv2_grad = conv2.backward(relu2_grad)
            relu1_grad = relu1.backward(conv2_grad)
            conv1_grad = conv1.backward(relu1_grad)

            return loss, conv1_grad
Ejemplo n.º 4
0
        def conv(b):
            """
            the derivative check in the gradient checker relates to the input of the function
            hence, the input should be z - since the backward step computes @loss / @z
            """

            # simulate end of classification
            conv = nn.Conv2d(in_channels=1, out_channels=3, kernel_size=2)
            relu = nn.Relu()
            flatten = nn.Flatten()
            linear = nn.Linear(in_dimension=12, out_dimension=4)
            softmax = nn.Softmax()

            conv.set_biases(b.reshape(3, 1))

            # forward
            a = flatten(relu(conv(x)))
            dist = softmax(linear(a))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            flatten_grad = flatten.backward(linear_grad)
            relu_grad = relu.backward(flatten_grad)
            conv_grad = conv.backward(relu_grad)

            b_grad = conv.b_grad

            return loss, b_grad
Ejemplo n.º 5
0
 def __init__(self):
     super(SimpleCNN, self).__init__()
     self.conv1 = nn.Conv2d(in_channels=3,
                            out_channels=6,
                            kernel_size=3,
                            stride=3,
                            padding=2)
     self.tanh1 = nn.Tanh()
     self.conv2 = nn.Conv2d(in_channels=6,
                            out_channels=10,
                            kernel_size=3,
                            stride=3,
                            padding=3)
     self.tanh2 = nn.Tanh()
     self.dropout2d = nn.Dropout2d(rate=0.5)
     self.flatten = nn.Flatten()
     self.linear = nn.Linear(in_dimension=360, out_dimension=10)
     self.softmax = nn.Softmax()
     self.set_forward()
Ejemplo n.º 6
0
        def flatten(x):

            flatten_ = nn.Flatten()
            linear = nn.Linear(in_dimension=48, out_dimension=4)
            softmax = nn.Softmax()

            # forward
            flatten_x = flatten_(x)
            dist = softmax(linear(flatten_x))

            # backward
            labels = np.zeros(dist.shape)
            labels[:, 1] = 1
            loss = -np.log(np.sum(dist * labels, axis=1))

            softmax_grad = softmax.backward(labels)
            linear_grad = linear.backward(softmax_grad)
            flatten_grad = flatten_.backward(linear_grad)
            return loss, flatten_grad
Ejemplo n.º 7
0
    def __init__(self, conditional=False, width=64, color_output=False):
        super(Discriminator, self).__init__()

        self.conditional = conditional

        sn = th.nn.utils.spectral_norm

        num_chan_in = 3 if color_output else 1

        self.net = th.nn.Sequential(
            th.nn.Conv2d(num_chan_in, width, 3, padding=1),
            th.nn.LeakyReLU(0.2, inplace=True),
            th.nn.Conv2d(width, 2*width, 4, padding=1, stride=2),
            th.nn.LeakyReLU(0.2, inplace=True),
            # 16x16

            sn(th.nn.Conv2d(2*width, 2*width, 3, padding=1)),
            th.nn.LeakyReLU(0.2, inplace=True),
            sn(th.nn.Conv2d(2*width, 4*width, 4, padding=1, stride=2)),
            th.nn.LeakyReLU(0.2, inplace=True),
            # 8x8

            sn(th.nn.Conv2d(4*width, 4*width, 3, padding=1)),
            th.nn.LeakyReLU(0.2, inplace=True),
            sn(th.nn.Conv2d(4*width, width*4, 4, padding=1, stride=2)),
            th.nn.LeakyReLU(0.2, inplace=True),
            # 4x4

            sn(th.nn.Conv2d(4*width, 4*width, 3, padding=1)),
            th.nn.LeakyReLU(0.2, inplace=True),
            sn(th.nn.Conv2d(4*width, width*4, 4, padding=1, stride=2)),
            th.nn.LeakyReLU(0.2, inplace=True),
            # 2x2

            modules.Flatten(),
            th.nn.Linear(width*4*2*2, 1),
        )
Ejemplo n.º 8
0
def roar_kar(keep, random=False, train_only=False):

    logdir = 'tf_logs/standard/'

    def get_savedir():

        savedir = logdir.replace('tf_logs', 'KAR' if keep else 'ROAR')

        if not os.path.exists(savedir):

            os.makedirs(savedir)

        return savedir


#     ratio = 0.1

    percentiles = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
    attribution_methods = ['normal', 'LRP', 'proposed_method']

    if not train_only:
        DNN = model_io.read('../models/MNIST/LeNet-5.nn')
        for v in attribution_methods:
            batch_size = 128
            print("{} Step is start".format(v))
            if random:
                print("{} percentile Remove".format(v))
                occlude_dataset(DNN=DNN,
                                attribution=v,
                                percentiles=percentiles,
                                random=True,
                                keep=keep,
                                batch_size=batch_size,
                                savedir=get_savedir())
            else:
                print("{} Random Remove".format(v))
                occlude_dataset(DNN=DNN,
                                attribution=v,
                                percentiles=percentiles,
                                random=False,
                                keep=keep,
                                batch_size=batch_size,
                                savedir=get_savedir())
            print("{} : occlude step is done".format(v))
        print("ress record")
    ress = {k: [] for k in attribution_methods}

    for _ in range(3):

        for v in attribution_methods:

            res = []

            for p in percentiles:

                occdir = get_savedir() + '{}_{}_{}.pickle'.format('{}', v, p)
                occdir_y = get_savedir() + '{}_{}_{}_{}.pickle'.format(
                    '{}', v, p, 'label')

                data_train = unpickle(occdir.format('train'))
                #                 data_test = unpickle(occdir.format('test'))
                Xtrain = np.array(data_train)
                Ytrain = unpickle(occdir_y.format('train'))
                Ytrain = np.array(Ytrain)
                Xtest = data_io.read('../data/MNIST/test_images.npy')
                Ytest = data_io.read('../data/MNIST/test_labels.npy')
                print("check : {}".format(Ytrain.shape))

                Xtest = scale(Xtest)
                Xtest = np.reshape(Xtest, [Xtest.shape[0], 28, 28, 1])
                Xtest = np.pad(Xtest, ((0, 0), (2, 2), (2, 2), (0, 0)),
                               'constant',
                               constant_values=(-1., ))
                Ix = Ytest[:, 0].astype(int)
                Ytest = np.zeros([Xtest.shape[0], np.unique(Ytest).size])
                Ytest[np.arange(Ytest.shape[0]), Ix] = 1
                print(occdir)

                #                 DNN = model_io.read('../models/MNIST/LeNet-5.nn')

                DNN = modules.Sequential([
                                modules.Convolution(filtersize=(5,5,1,10),stride = (1,1)),\
                                modules.Rect(),\
                                modules.SumPool(pool=(2,2),stride=(2,2)),\
                                modules.Convolution(filtersize=(5,5,10,25),stride = (1,1)),\
                                modules.Rect(),\
                                modules.SumPool(pool=(2,2),stride=(2,2)),\
                                modules.Convolution(filtersize=(4,4,25,100),stride = (1,1)),\
                                modules.Rect(),\
                                modules.SumPool(pool=(2,2),stride=(2,2)),\
                                modules.Convolution(filtersize=(1,1,100,10),stride = (1,1)),\
                                modules.Flatten()
                            ])
                print("training...")
                DNN.train(X=Xtrain,\
                    Y=Ytrain,\
                    Xval=Xtest,\
                    Yval=Ytest,\
                    iters=10**5,\
                    lrate=0.0001,\
#                     status = 2,\
                    batchsize = 128
                         )
                #                 ypred = DNN.forward(Xtest)

                acc = np.mean(
                    np.argmax(DNN.forward(Xtest), axis=1) == np.argmax(Ytest,
                                                                       axis=1))
                del DNN
                print('metric model test accuracy is: {:0.4f}'.format(acc))

                res.append(acc)
            print("End of {}:training, accuracy...".format(_))

            ress[v].append(res)
    print("metric...")
    res_mean = {v: np.mean(v, axis=0) for v in ress.item()}

    print(res_mean)

    return res_mean
Ejemplo n.º 9
0
def flatten(layer):
    module = modules.Flatten()
    return module, None
Ejemplo n.º 10
0
def globalaveragepooling2D(layer):
    h, w = layer.input_shape[1:3]
    module = modules.AveragePool(pool=(h, w), stride=(1, 1))
    return module, modules.Flatten()
Ejemplo n.º 11
0
    # transfer pixel values from [0 255] to [-1 1] to satisfy the expected input / training paradigm of the model
    Xtrain =  Xtrain / 127.5 - 1
    Xtest =  Xtest / 127.5 - 1

    # transform numeric class labels to vector indicator for uniformity. assume presence of all classes within the label set
    I = Ytrain[:,0].astype(int)
    Ytrain = np.zeros([Xtrain.shape[0],np.unique(Ytrain).size])
    Ytrain[np.arange(Ytrain.shape[0]),I] = 1

    I = Ytest[:,0].astype(int)
    Ytest = np.zeros([Xtest.shape[0],np.unique(Ytest).size])
    Ytest[np.arange(Ytest.shape[0]),I] = 1

    nn = modules.Sequential(
        [
            modules.Flatten(),
            modules.Linear(784, 1296),
            modules.Rect(),
            modules.Linear(1296,1296),
            modules.Rect(),
            modules.Linear(1296,1296),
            modules.Rect(),
            modules.Linear(1296, 10),
            modules.SoftMax()
        ]
    )
    
    nn.train(Xtrain, Ytrain, Xtest, Ytest, batchsize=64, iters=35000, status=1000)
    acc = np.mean(np.argmax(nn.forward(Xtest), axis=1) == np.argmax(Ytest, axis=1))
    if not np == numpy: # np=cupy
        acc = np.asnumpy(acc)