Ejemplo n.º 1
0
    def mux_signal(signals,
                   center_freq=None,
                   relative_freq=None,
                   wdm_comb_config=None):
        if signals[0].is_on_cuda:
            import cupy as np
        else:
            import numpy as np

        freqs = np.array([signal.freq for signal in signals])

        fs = np.array([signal.fs_in_fiber for signal in signals])
        if hasattr(np, 'asnumpy'):
            fs = np.asnumpy(fs)

        if not np.all(np.diff(fs) == 0):
            print(np.diff(fs))
            raise Exception('fs_in_fiber of the signal must be the same')

        length = np.array([len(signal) for signal in signals])

        if relative_freq is None:
            relative_freq = np.array(freqs) - (np.max(freqs) +
                                               np.min(freqs)) / 2
            wdm_comb_config = np.arange(len(signals))
            center_freq = (np.max(freqs) + np.min(freqs)) / 2
            if hasattr(np, 'asnumpy'):
                center_freq = np.asnumpy(center_freq)
                relative_freq = np.asnumpy(relative_freq)
                wdm_comb_config = np.asnumpy(wdm_comb_config)

        else:
            assert center_freq is not None
            assert wdm_comb_config is not None

        max_length = np.max(length)
        df = fs[0] / max_length

        wdm_samples = 0

        for idx, signal in enumerate(signals):
            freq_samples = np.fft.fft(signal[:], n=max_length, axis=-1)
            yidong_dianshu = relative_freq[idx] / df
            yidong_dianshu = np.ceil(yidong_dianshu)
            yidong_dianshu = np.int(yidong_dianshu)
            freq_samples = np.roll(freq_samples, yidong_dianshu, axis=-1)

            wdm_samples += freq_samples
        symbols = [signal.symbol for signal in signals]
        wdm_signal = WdmSignal(symbols,
                               np.fft.ifft(wdm_samples, axis=-1),
                               relative_freq,
                               signals[0].is_on_cuda,
                               fs_in_fiber=fs[0],
                               center_freq=center_freq)
        wdm_signal.wdm_comb_config = wdm_comb_config
        wdm_signal.baudrates = [signal.baudrate for signal in signals]
        wdm_signal.qam_orders = [signal.qam_order for signal in signals]
        return wdm_signal
Ejemplo n.º 2
0
def get_best_image_th(df, embeddings):
    KNN = 50
    model = NearestNeighbors(n_neighbors=KNN)
    model.fit(embeddings)
    distances, indices = model.kneighbors(embeddings)

    thresholds = list(np.arange(0.6, 0.9, 0.02))
    bar = tqdm(thresholds)
    scores = []

    for th in bar:
        preds = []
        for dist, idx in zip(distances, indices):
            posting_ids = df.iloc[np.asnumpy(idx[dist < th])].posting_id.values
            preds.append(posting_ids)

        df['tmp'] = preds
        df['f1'] = df.apply(getMetric('tmp'), axis=1)
        score = df.f1.mean()
        scores.append(score)
        bar.set_description(
            f"Threshold: {th:.4f} - Score: {score:.4f} Best threshold: {thresholds[np.argmax(scores)]:.4f} - Score: {max(scores):.4f}"
        )

    return thresholds[np.argmax(scores)]
Ejemplo n.º 3
0
def block_toep2_sym(a):
    '''generate full representation of 2-level symmetric toeplitz matrix

    Args:
        a (ndarray): 1-st column of the symmetrec toeplitz matrix in proper shape.
    Returns:
        Full filled toeplitz matrix.
    '''
    if use_gpu > 0:
        import cupy
        xp = cupy.get_array_module(a)
        if xp is cupy:
            a = xp.asnumpy(a)
    else:
        xp = np
        a = np.asnumpy(a)
        
    A1 = []
    n0,n1 = a.shape
    for i in range(n1):
        A1.append(splin.toeplitz(a[:,i]))
    A = np.empty((n1,n0,n1,n0))
    for i in range(n1):
        for j in range(n1):
            A[i,:,j,:] = A1[np.int(np.abs(i-j))]
    A.shape = (n0*n1,n0*n1)
    A = xp.asarray(A)
    
    return(A)
Ejemplo n.º 4
0
def cd_compensation(signal, spans, inplace=False):
    '''

    This function is used for chromatic dispersion compensation in frequency domain.
    The signal is Signal object, and a new sample is created from property data_sample_in_fiber

    :param signal: Signal object
    :param spans: Span object, the signal's has propagated through these spans
    :param inplace: if True, the compensated sample will replace the original sample in signal,or new ndarray will be r
    eturned

    :return: if inplace is True, the signal object will be returned; if false the ndarray will be returned
    '''
    try:
        import cupy as np
    except Exception:
        import numpy as np


    center_wavelength = signal.center_wavelength
    freq_vector = fftfreq(signal[0, :].shape[0], 1 / signal.fs_in_fiber)
    omeg_vector = 2 * np.pi * freq_vector

    sample = np.array(signal[:])

    if not isinstance(spans, list):
        spans = [spans]

    for span in spans:
        beta2 = -span.beta2(center_wavelength)
        dispersion = (-1j / 2) * beta2 * omeg_vector ** 2 * span.length
        dispersion = np.array(dispersion)
        for pol_index in range(sample.shape[0]):
            sample[pol_index, :] = np.fft.ifft(np.fft.fft(sample[pol_index, :]) * np.exp(dispersion))

    if inplace:
        if hasattr(np,'asnumpy'):
            sample = np.asnumpy(sample)
        signal[:] = sample
        return signal
    else:
        if hasattr(np,'asnumpy'):
            sample = np.asnumpy(sample)
        signal = copy.deepcopy(signal)
        signal[:] = sample
        return signal
Ejemplo n.º 5
0
 def wrap(*p, **key):
     (h, w), img = p[0].shape[:2], p[0]
     img = np.asarray(img, dtype=astype)
     tps = {'sample', 'window', 'glob'}
     ftp = fp, tp = {}, {}
     for i in key:
         ftp[i in tps][i] = key[i]
     ssz = tp.get('sample', sample)
     wsz = wsh = wsw = tp.get('window', window)
     gsz = tp.get('glob', glob)
     mar = tp.get('margin', margin)
     info = tp.get('progress', print)
     if isinstance(ssz, tuple): ssz = list(ssz)
     else: ssz = [int(h*ssz), int(w*ssz)]
     # 如果尺寸不足瓦片尺寸,则连同瓦片缩放到glob整数倍
     if wsh>ssz[0]: wsh = ssz[0] = ssz[0]//gsz*gsz
     if wsw>ssz[1]: wsw = ssz[1] = ssz[1]//gsz*gsz
     if ssz!=[h, w]: img = resize(img, ssz)
     if isinstance(mar, float): mar = int(wsz*mar)
     rcs = grid_slice(*ssz, wsh, wsw, mar)
     if len(rcs)>1: info(1, len(rcs))
     rst = f(img[rcs[0]], *p[1:], **fp)
     if len(rcs)==1 and ssz!=[h, w]:
         rst = resize(rst, (h,w))
     if len(rcs)==1: return np.asnumpy(rst)
     outshp = img.shape[:2] + rst.shape[2:]
     weights = np.zeros(rst.shape[:2], dtype='uint16')
     if rst.ndim==3: weights = weights[:,:,None]
     weights += mar + 1
     for i in range(mar, 0, -1):
         weights[i-1,:] = weights[-i,:] = i
         weights[:,i-1] = weights[:,-i] = i
     rst *= weights
     buf = np.zeros(outshp, dtype=rst.dtype)
     count = np.zeros(outshp[:2], dtype='uint16')
     if rst.ndim==3: count = count[:,:,None]
     buf[rcs[0]] = rst; count[rcs[0]] += weights
     for i in range(1, len(rcs)):
         info(i+1, len(rcs))
         rst = f(img[rcs[i]], *p[1:], **fp)
         rst *= weights
         buf[rcs[i]] += rst; count[rcs[i]] += weights
     np.divide(buf, count, out=buf, casting='unsafe')
     if ssz!=(h, w): buf = resize(buf, (h,w))
     return np.asnumpy(buf)
Ejemplo n.º 6
0
def test():
    import matplotlib.pyplot as plt
    from imageio import imread
    img = imread(root+'/bus.jpg')
    img = np.asnumpy(img)
    rst = recognize(img)
    plt.imshow(img)
    plt.title(rst)
    plt.show()
Ejemplo n.º 7
0
    def get_test_acc(methods, targets, test_acc):
        for m in methods:
            for target in targets:
                model_name = '{}_{}'.format('simple_cnn', target)

                acc = np.mean(np.argmax(nn.forward(X), axis=1) == np.argmax(Y, axis=1))
                # 가장 높은 값의 평균을 가져오고, 예측값 진행(argmax) forward는 pytorch로 코딩한 것.
                if not np == numpy: # np=cupy
                    acc = np.asnumpy(acc)
                test_acc[target][m].append(acc)
        return test_acc
def  HSVStretching(sceneRadiance):
    
    sceneRadiance = cp.uint8(sceneRadiance)
    img_hsv = rgb2hsv(sceneRadiance)
    img_hsv = cp.array(img_hsv)
    labArray = global_stretching(img_hsv,height, width)
    labArray= cp.asnumpy(labArray)
    img_rgb = hsv2rgb(labArray) * 255
    #print(img_rgb)

    return img_rgb
Ejemplo n.º 9
0
    def generateImages(self, noise, epoch):
        generatedImgs = self.generator.feedforward(noise)
        if np.__name__ == "cupy":
            generatedImgs = np.asnumpy(generatedImgs)
        plt.figure(figsize=(10, 10))
        plt.title(f"Epoch {epoch}")

        for i in range(10):
            plt.subplot(10, 10, i + 1)
            plt.imshow(generatedImgs[:, i].reshape((28, 28)), cmap='gray')
            plt.axis('off')
        plt.savefig(f"ganImages/{epoch}.png")
Ejemplo n.º 10
0
def block_circ(a):
    '''generate full representation of 2-level circulant matrix

    Args:
        a (ndarray): 1-st column of the circulant matrix in proper shape.
    Returns:
        Full filled circulant matrix
    '''
    if use_gpu > 0:
        import cupy
        xp = cupy.get_array_module(a)
        if xp is cupy:
            a = xp.asnumpy(a)
    else:
        xp = np
        a = np.asnumpy(a)
            
    if a.ndim == 1:
        return splin.circulant(a)
    n_total = np.prod(a.shape)
    a_shape = a.shape
    A = np.zeros(np.hstack([np.array(a.shape),np.array(a.shape)]))
    A_shape = A.shape
    x_slice = [0]*a.ndim
    x_slice[-1] = slice(None)
    x_target_slice = [0]*a.ndim
    x_target_slice[-1] = slice(None)
    y_slice = [slice(None)]*2
    a = a.reshape(-1,a_shape[-1])
    A = A.reshape(-1,a_shape[-1],*a_shape)
    for i,sub_column in enumerate(a):
        print(sub_column)
        y_slice[0] = i
        A[tuple(y_slice+x_slice)] = splin.circulant(sub_column)
    for ilevel in range(len(a_shape)-1):
        A = A.reshape(-1,*a_shape[len(a_shape)-ilevel-2:],*a_shape)
        y_slice = [slice(None)]*(ilevel+3)
        y_target_slice = [slice(None)]*(ilevel+3)
        for k in range(A.shape[0]):
            y_slice[0] = k
            y_target_slice[0] = k
            for i in range(a_shape[len(a_shape)-ilevel-2]):
                y_slice[1] = i
                for j in range(1,a_shape[len(a_shape)-ilevel-2]):
                    x_slice[len(a_shape)-ilevel-2] = j
                    y_target_slice[1] = np.mod(i-j,a_shape[len(a_shape)-ilevel-2])
                    A[tuple(y_slice+x_slice)] = A[tuple(y_target_slice+x_target_slice)]
        x_slice[len(a_shape)-ilevel-2] = slice(None)
        x_target_slice[len(a_shape)-ilevel-2] =slice(None)
    A = A.reshape(A_shape)
    return A
Ejemplo n.º 11
0
def _non_maximum_suppression_gpu(bbox, thresh, score=None, limit=None):
    if len(bbox) == 0:
        return cp.zeros((0, ), dtype=np.int32)

    n_bbox = bbox.shape[0]

    if score is not None:
        order = score.argsort()[::-1].astype(np.int32)
    else:
        order = cp.arange(n_bbox, dtype=np.int32)

    sorted_bbox = bbox[order, :]
    selec, n_selec = _call_nms_kernel(sorted_bbox, thresh)
    selec = selec[:n_selec]
    selec = order[selec]
    if limit is not None:
        selec = selec[:limit]
    return cp.asnumpy(selec)
Ejemplo n.º 12
0
def pointInside_Tetrahedron(point, tetra, origin, use_cupy=False):
    """
    Takes a single point or array of points, as well as tetra and origin objects returned by 
    the Tetrahedron function.
    Returns a boolean or boolean array indicating whether the point is inside the tetrahedron.
    """
    if use_cupy:
        import cupy as np
    else:
        import numpy as np
    tetra = np.array(tetra)
    origin = np.array(origin)
    point = np.array(point)

    newp = np.matmul(tetra, (point - origin).T).T
    mask = np.all(newp >= 0, axis=-1) & np.all(
        newp <= 1, axis=-1) & (np.sum(newp, axis=-1) <= 1)
    if use_cupy:
        return np.asnumpy(mask)
    return mask
Ejemplo n.º 13
0
                    print("{:.2f}".format(
                        100 * full / (resolution * resolution * resolution)))
                    full += 1

        XY = np.array(xy)
        XY.shape = (resolution, resolution)

        XZ = np.array(xz)
        XZ.shape = (resolution, resolution)

        YZ = np.array(yz)
        YZ.shape = (resolution, resolution)

        try:
            XY = np.asnumpy(XY)
            XZ = np.asnumpy(XZ)
            YZ = np.asnumpy(YZ)
        except:
            pass

        XY[0][0] = 0
        XY[0][1] = 1

        XZ[0][0] = 0
        XZ[0][1] = 1

        YZ[0][0] = 0
        YZ[0][1] = 1

        fig, axs = plt.subplots(2, 2)
Ejemplo n.º 14
0
def learning(model, optimizer, n_epoch=20, batchsize=100):
    #出力用のリスト
    train_loss_list = []
    train_acc_list = []
    test_loss_list = []
    test_acc_list = []

    for epoch in tqdm(range(n_epoch)):
        print('epoch {} : '.format(epoch + 1), end="")

        # 訓練
        sum_loss = 0
        pred_y = []
        perm = np.random.permutation(X_train.shape[0])  # 訓練データをランダムにシャッフル

        for i in range(0, X_train.shape[0], batchsize):
            x = X_train[perm[i:i + batchsize]]
            t = Y_train[perm[i:i + batchsize]]

            loss = model.forward(x, t)
            model.backward()
            optimizer.update()

            sum_loss += loss * len(x)

            pred_y.extend(np.argmax(model.y, axis=1).tolist())

        loss = sum_loss / X_train.shape[0]

        # accuracy : 予測結果を1-hot表現に変換し,正解との要素積の和を取ることで,正解数を計算できる.
        accuracy = np.sum(
            np.eye(10)[pred_y] * Y_train[perm]) / X_train.shape[0]
        if gpu:
            accuracy = np.asnumpy(accuracy)
        print('Train loss {:.3f}, Train accuracy {:.4f} | '.format(
            float(loss), accuracy),
              end="")
        train_loss_list.append(float(loss))
        train_acc_list.append(accuracy)

        # test
        sum_loss = 0

        pred_y = []
        for i in range(0, X_test.shape[0], batchsize):
            x = X_test[i:i + batchsize]
            t = Y_test[i:i + batchsize]

            sum_loss += model.forward(x, t, train_config=False) * len(x)
            pred_y.extend(np.argmax(model.y, axis=1).tolist())
        loss = sum_loss / X_test.shape[0]

        accuracy = np.sum(np.eye(10)[pred_y] * Y_test) / X_test.shape[0]
        if gpu:
            accuracy = np.asnumpy(accuracy)
        print('Test loss {:.3f}, Test accuracy {:.4f}'.format(
            float(loss), accuracy))
        test_loss_list.append(float(loss))
        test_acc_list.append(accuracy)

    return train_loss_list, train_acc_list, test_loss_list, test_acc_list
Ejemplo n.º 15
0
# imports
import model_io
import data_io
import render

import importlib.util as imp
import numpy
import numpy as np
if imp.find_spec("cupy"):  #use cupy for GPU support if available
    import cupy
    import cupy as np
na = np.newaxis
# end of imports

nn = model_io.read('../models/MNIST/LeNet-5.nn')  # read model
X = data_io.read('../data/MNIST/test_images.npy')[
    na, 0, :]  # load first MNIST test image
X = X / 127.5 - 1  # normalized data to range [-1 1]

Ypred = nn.forward(X)  # forward pass through network
R = nn.lrp(Ypred)  # lrp to explain prediction of X

if not np == numpy:  # np=cupy
    X = np.asnumpy(X)
    R = np.asnumpy(R)

# render rgb images and save as image
digit = render.digit_to_rgb(X)
hm = render.hm_to_rgb(R, X)  # render heatmap R, use X as outline
render.save_image([digit, hm], '../2nd_py.png')
Ejemplo n.º 16
0
def occlude_dataset(DNN, attribution, percentiles, test=False, keep=False, random=False, batch_size= 128, savedir=''):
    '''
    XAI를 위한 LRP Relevance Score 도출
    percentile: Masking Percent
    test: Test만 진행 할 것인지
    keep: KAR / ROAR하기 위한 Argument
    '''
    
    print("Condition of test : {}".format(test))
    if test:
        Xs = Xtest
        ys = Ytest
    else:
        Xs = Xtrain
        ys = Ytrain
    
    print("initial batch_size is : {}".format(batch_size))
    total_batch = math.ceil(len(Xs) / batch_size)
    print("batch size is :{}".format(total_batch))
    hmaps = []
    data = []
    label = []
    
    ## Relevance Score 도출
    for i in tqdm(range(total_batch)):
        if 'LRP' in attribution:
            x = Xs[i:i+1,...]
            y = ys[i:i+1,...]
            ypred = DNN.forward(x)

            m = np.zeros_like(ypred)
            m[:,np.argmax(ypred)] = 1
            Rinit = ypred*m
            Rinit.astype(np.float)
            R = DNN.lrp(Rinit,'epsilon',1.)
            R = R.sum(axis=3)
            if not np == numpy:
                R = np.asnumpy(R)
            if test:
                LRP_test = render.digit_to_rgb(R, scaling = 3)
            attrs = R

        elif 'proposed_method' in attribution:
            x = Xs[i:i+1,...]
            y = ys[i:i+1,...]
            ypred = DNN.forward(x)

            m = np.zeros_like(ypred)
            m[:,np.argmax(ypred)] = 1
            Rinit = ypred*m
            Rinit.astype(np.float)
            R = DNN.lrp(Rinit,'epsilon',1.)
            R = R.sum(axis=3)
            if not np == numpy: 
                xs = np.asnumpy(x)
                R = np.asnumpy(R)
            
            ## GLS 진행
            xs = x
            tar = xs
            a = np.load('../r_array/convolution.npy')
            a = np.reshape(a,[a.shape[1]*a.shape[2],1])
            b = np.load('../r_array/rect.npy')    
            b = np.pad(b,((0,0),(2,2),(2,2),(0,0)))
            b = np.reshape(b,[b.shape[1]*b.shape[2],b.shape[0]*b.shape[3]])
            c = np.load('../r_array/sumpoll.npy')
            c = np.pad(c,((0,0),(2,2),(2,2),(0,0)))
            c = np.reshape(c,[c.shape[1]*c.shape[2],c.shape[3]])
            new_b = np.hstack((b, c))
            new = np.hstack((a, new_b))
            tar = np.reshape(tar, [tar.shape[0]*tar.shape[1]*tar.shape[2]])
            y_tran = tar.transpose()
            new = sm.add_constant(new)

            model = sm.GLSAR(y_tran, new, rho = 2)
            result = model.iterative_fit(maxiter = 30)
            find = result.resid
            check = np.reshape(find,[1,32,32])
            if test:
                proposed_test = render.digit_to_rgb(check, scaling = 3)
            attrs = check
        else:
            x = Xs[i:i+1,...]
            y = ys[i:i+1,...]
            if not np == numpy:
                xs = np.asnumpy(x)
            xs = x
            if test:
                digit = render.digit_to_rgb(xs, scaling = 3)
            attrs = xs
        attrs += np.random.normal(scale=1e-4, size=attrs.shape)
        hmaps.append(attrs)
        data.append(x)
        label.append(y)
        
    ## Heatmap 도출
    print("Interpretation is done, concatenate...")
    hmaps = np.concatenate(hmaps, axis=0)
    data = np.concatenate(data, axis = 0)
    
    print("concatenate is done...")
    print("print final : {}".format(hmaps.shape))

    for percent in tqdm(percentiles):
        batch_attrs = hmaps
        occluded_images = remove(data, batch_attrs, percent, keep)
        print("save start")
        print("Save directory is {}".format(savedir))
        save(occluded_images, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
        save(np.concatenate(label, axis = 0), savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
        print("Occlude image {} percentile...".format(percent))
Ejemplo n.º 17
0
nn.drop_softmax_output_layer()  #drop softnax output layer for analyses

X = data_io.read('../data/MNIST/test_images.npy')
Y = data_io.read('../data/MNIST/test_labels.npy')

# transfer pixel values from [0 255] to [-1 1] to satisfy the expected input / training paradigm of the model
X = X / 127.5 - 1

# transform numeric class labels to vector indicator for uniformity. assume presence of all classes within the label set
I = Y[:, 0].astype(int)
Y = np.zeros([X.shape[0], np.unique(Y).size])
Y[np.arange(Y.shape[0]), I] = 1

acc = np.mean(np.argmax(nn.forward(X), axis=1) == np.argmax(Y, axis=1))
if not np == numpy:  # np=cupy
    acc = np.asnumpy(acc)
print('model test accuracy is: {:0.4f}'.format(acc))

#permute data order for demonstration. or not. your choice.
I = np.arange(X.shape[0])
#I = np.random.permutation(I)

#predict and perform LRP for the 10 first samples
for i in I[:10]:
    x = X[na, i, :]

    #forward pass and prediction
    ypred = nn.forward(x)
    print('True Class:     ', np.argmax(Y[i]))
    print('Predicted Class:', np.argmax(ypred), '\n')
Ejemplo n.º 18
0
def dicToNumpy(dic):
    for key in dic.keys():
        if 'cupy' in str(type(dic[key])):
            dic[key] = np.asnumpy(dic[key])
    return dic
Ejemplo n.º 19
0
def dicToDF(dic):
    for key in dic.keys():
        if 'cupy' in str(type(dic[key])):
            dic[key] = np.asnumpy(dic[key])
    return pd.DataFrame(dic)
Ejemplo n.º 20
0
def occlude_dataset(DNN,
                    attribution,
                    percentiles,
                    test=False,
                    keep=False,
                    random=False,
                    batch_size=128,
                    savedir=''):
    print("Condition of test : {}".format(test))
    if test:
        Xs = Xtest
        ys = Ytest
    else:
        Xs = Xtrain
        ys = Ytrain

    print("initial batch_size is : {}".format(batch_size))
    total_batch = math.ceil(len(Xs) / batch_size)
    print("batch size is :{}".format(total_batch))
    hmaps = []
    data = []
    label = []
    for i in tqdm(range(total_batch)):

        #         batch_xs = Xs[i*batch_size:(i+1)*batch_size]
        # #         batch_xs_scaled = scale(batch_xs)
        if 'LRP' in attribution:
            #                 for t in It[:10]:
            x = Xs[i:i + 1, ...]
            y = ys[i:i + 1, ...]
            ypred = DNN.forward(x)
            #                 print('True Class:     ', np.argmax(ys[i]))
            #                 print('Predicted Class:', np.argmax(ypred),'\n')
            m = np.zeros_like(ypred)
            m[:, np.argmax(ypred)] = 1
            Rinit = ypred * m
            Rinit.astype(np.float)
            R = DNN.lrp(Rinit, 'epsilon', 1.)
            R = R.sum(axis=3)
            if not np == numpy:
                R = np.asnumpy(R)
            if test:
                LRP_test = render.digit_to_rgb(R, scaling=3)
            attrs = R
#                 attrs = np.sum(np.where(attrs > 0, attrs, 0.0), axis=-1)
#                 print("print lrp : {}".format(attrs.shape))
        elif 'proposed_method' in attribution:
            #                 for t in It[:10]:
            x = Xs[i:i + 1, ...]
            y = ys[i:i + 1, ...]
            ypred = DNN.forward(x)
            #                 print('True Class:     ', np.argmax(ys[i]))
            #                 print('Predicted Class:', np.argmax(ypred),'\n')
            m = np.zeros_like(ypred)
            m[:, np.argmax(ypred)] = 1
            Rinit = ypred * m
            Rinit.astype(np.float)
            R = DNN.lrp(Rinit, 'epsilon', 1.)
            R = R.sum(axis=3)
            if not np == numpy:
                xs = np.asnumpy(x)
                R = np.asnumpy(R)
            xs = x
            tar = xs
            a = np.load('../r_array/convolution.npy')
            a = np.reshape(a, [a.shape[1] * a.shape[2], 1])
            b = np.load('../r_array/rect.npy')
            b = np.pad(b, ((0, 0), (2, 2), (2, 2), (0, 0)))
            b = np.reshape(b,
                           [b.shape[1] * b.shape[2], b.shape[0] * b.shape[3]])
            c = np.load('../r_array/sumpoll.npy')
            c = np.pad(c, ((0, 0), (2, 2), (2, 2), (0, 0)))
            c = np.reshape(c, [c.shape[1] * c.shape[2], c.shape[3]])
            new_b = np.hstack((b, c))
            new = np.hstack((a, new_b))
            tar = np.reshape(tar, [tar.shape[0] * tar.shape[1] * tar.shape[2]])
            y_tran = tar.transpose()
            new = sm.add_constant(new)
            #             print(new.shape)
            #             print(y_tran.shape)
            model = sm.GLSAR(y_tran, new, rho=2)
            result = model.iterative_fit(maxiter=30)
            find = result.resid
            check = np.reshape(find, [1, 32, 32])
            if test:
                proposed_test = render.digit_to_rgb(check, scaling=3)
            attrs = check
#                 attrs = np.sum(np.where(attrs > 0, attrs, 0.0), axis=-1)
#                 print("print propose : {}".format(attrs.shape))
        else:
            x = Xs[i:i + 1, ...]
            y = ys[i:i + 1, ...]
            if not np == numpy:
                xs = np.asnumpy(x)
            xs = x
            if test:
                digit = render.digit_to_rgb(xs, scaling=3)
            attrs = xs
#                 attrs = np.sum(np.where(attrs > 0, attrs, 0.0), axis=-1)
#                 print("print normal : {}".format(attrs.shape))
        attrs += np.random.normal(scale=1e-4, size=attrs.shape)
        #         print("print random normal : {}".format(attrs.shape))
        hmaps.append(attrs)
        data.append(x)
        label.append(y)


#         print("print final : {}".format(len(hmaps)))
    print("Interpretation is done, concatenate...")
    hmaps = np.concatenate(hmaps, axis=0)
    data = np.concatenate(data, axis=0)

    print("concatenate is done...")
    print("print final : {}".format(hmaps.shape))
    #     percentiles = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
    for percent in tqdm(percentiles):

        #         dataset = []
        #         y_target = []
        #         for i in It[:10]:

        #             batch_xs, batch_ys = Xs[i*batch_size:(i+1)*batch_size], ys[i*batch_size:(i+1)*batch_size]
        #             x = Xs[i:i+1,...]
        #             y = ys[i:i+1,...]
        # batch_attrs = hmaps[i:i+1,...]
        batch_attrs = hmaps
        occluded_images = remove(data, batch_attrs, percent, keep)

        #             dataset.append(scale(occluded_images))
        #             y_target.append(y)
        #             del occluded_images
        #
        print("save start")
        #         print("dataset shape : {}".format(dataset))
        print("Save directory is {}".format(savedir))
        save(
            occluded_images, savedir + '{}_{}_{}.pickle'.format(
                'test' if test else 'train', attribution, percent))

        #         save(np.concatenate(dataset, axis=0), savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
        #         save(np.concatenate(y_target, axis=0), savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
        save(
            np.concatenate(label, axis=0),
            savedir + '{}_{}_{}_{}.pickle'.format(
                'test' if test else 'train', attribution, percent, 'label'))
        print("Occlude image {} percentile...".format(percent))
Ejemplo n.º 21
0
def asnumpy(xparr):
    if 'cupy' in str(type(xparr)):
        return np.asnumpy(xparr)  # cupy to numpy
    return xparr  # do nothing
Ejemplo n.º 22
0
def asnumpy(A):
    return numpy.asnumpy(A)


#----------------------------------------------------------------------------------------------------------------------
Ejemplo n.º 23
0
    v5 = v5 * alpha + learing_rate_G * grad_G_w2
    v6 = v6 * alpha + learing_rate_G * grad_G_w3
    v7 = v7 * alpha + learing_rate_G * grad_G_w4

    G_w1 = G_w1 - v4
    G_w2 = G_w2 - v5
    G_w3 = G_w3 - v6
    G_w4 = G_w4 - v7

    if iter%200 == 0:
        current_gen = np.random.uniform(-1., 1., size=[16, 100])

        gl1 = current_gen.dot(G_w1)
        gl1A = arctan(gl1)

        gl2 = gl1A.dot(G_w2)
        gl2A = ReLu(gl2)

        gl3 = gl2A.dot(G_w3)
        gl3A = ReLu(gl3)

        gl4 = gl3A.dot(G_w4)
        gl4A = log(gl4)
    
        fig = plot(np.asnumpy(gl4A))
        plt.savefig('out/{}.png'.format(str(iter).zfill(3)), bbox_inches='tight')
        plt.close(fig)



# -- end code ---
Ejemplo n.º 24
0
def occlude_dataset(DNN, attribution, percentiles, test=False, keep=False, random=False, batch_size= 64, savedir=''):
    print("Condition of test : {}".format(test))
    if test:
        Xs = Xtest
        ys = Ytest
    else:
        Xs = Xtrain
        ys = Ytrain
    
    print("initial batch_size is : {}".format(batch_size))
    total_batch = math.ceil(len(Xs) / batch_size)
    print("batch size is :{}".format(total_batch))
    hmaps = []
    data = []
    label = []
    hmaps_00 = []
    hmaps_01 = []
    hmaps_02 = []
    hmaps_03 = []
    hmaps_04 = []
    hmaps_05 = []
    hmaps_06 = []
    hmaps_07 = []
    hmaps_08 = []
    hmaps_09 = []
    hmaps_10 = []
    data_00 = []
    data_01 = []
    data_02 = []
    data_03 = []
    data_04 = []
    data_05 = []
    data_06 = []
    data_07 = []
    data_08 = []
    data_09 = []
    data_10 = []
    label_00 = []
    label_01 = []
    label_02 = []
    label_03 = []
    label_04 = []
    label_05 = []
    label_06 = []
    label_07 = []
    label_08 = []
    label_09 = []
    label_10 = []
    for i in tqdm(range(total_batch)):
        if 'LRP' in attribution:
            x = Xs[i:i+1,...]
            y = ys[i:i+1,...]
            ypred = DNN.forward(x)
            m = np.zeros_like(ypred)
            m[:,np.argmax(ypred)] = 1
            Rinit = ypred*m
            Rinit.astype(np.float)
            R = DNN.lrp(Rinit,'epsilon',1.)
            R = R.sum(axis=3)
            if not np == numpy:
                R = np.asnumpy(R)
            
            attrs = R
            data = x
            
            attrs = scaling(attrs)
            attrs *= 255
            attrs = attrs.astype(np.uint8)
            attrs = scale(attrs)



        elif 'proposed_method' in attribution:
            x = Xs[i:i+1,...]
            y = ys[i:i+1,...]
            ypred = DNN.forward(x)

            m = np.zeros_like(ypred)
            m[:,np.argmax(ypred)] = 1
            Rinit = ypred*m
            Rinit.astype(np.float)
            R = DNN.lrp(Rinit,'epsilon',1.)
            R = R.sum(axis=3)
            if not np == numpy: 
                xs = np.asnumpy(x)
                R = np.asnumpy(R)
            xs = x
            tar = xs
            a = np.load('../r_array/convolution.npy')
            a = np.reshape(a,[a.shape[1]*a.shape[2],1])
            b = np.load('../r_array/rect.npy')    
            b = np.pad(b,((0,0),(2,2),(2,2),(0,0)))
            b = np.reshape(b,[b.shape[1]*b.shape[2],b.shape[0]*b.shape[3]])
            c = np.load('../r_array/sumpoll.npy')
            c = np.pad(c,((0,0),(2,2),(2,2),(0,0)))
            c = np.reshape(c,[c.shape[1]*c.shape[2],c.shape[3]])
            new_b = np.hstack((b, c))
            new = np.hstack((a, new_b))
            tar = np.reshape(tar, [tar.shape[0]*tar.shape[1]*tar.shape[2]])
            y_tran = tar.transpose()
            new = sm.add_constant(new)

            model = sm.GLSAR(y_tran, new, rho = 2)
            result = model.iterative_fit()
            find = result.resid
            check = np.reshape(find,[1,32,32])
            
            attrs = check
            data = x
            
            attrs = scaling(attrs)
            attrs *= 255
            attrs = attrs.astype(np.uint8)
            attrs = scale(attrs)

            
        else:
            x = Xs[i:i+1,...]
            y = ys[i:i+1,...]
            if not np == numpy:
                xs = np.asnumpy(x)
            xs = x
            
            attrs = xs
            data = x
            
#             attrs = scaling(attrs)
            attrs *= 255
            attrs = attrs.astype(np.uint8)
            attrs = scale(attrs)


        for percent in tqdm(percentiles):
            batch_attrs = attrs
            data = data
            if attribution == 'normal':
                if percent == 0.0:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_00.append(occluded_images)
                    data_00.append(data)
                    label_00.append(y)
                elif percent == 0.1:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_01.append(occluded_images)
                    data_01.append(data)
                    label_01.append(y)
                elif percent == 0.2:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_02.append(occluded_images)
                    data_02.append(data)
                    label_02.append(y)
                elif percent == 0.3:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_03.append(occluded_images)
                    data_03.append(data)
                    label_03.append(y)
                elif percent == 0.4:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_04.append(occluded_images)
                    data_04.append(data)
                    label_04.append(y)
                elif percent == 0.5:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_05.append(occluded_images)
                    data_05.append(data)
                    label_05.append(y)
                elif percent == 0.6:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_06.append(occluded_images)
                    data_06.append(data)
                    label_06.append(y)
                elif percent == 0.7:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_07.append(occluded_images)
                    data_07.append(data)
                    label_07.append(y)
                elif percent == 0.8:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_08.append(occluded_images)
                    data_08.append(data)
                    label_08.append(y)
                elif percent == 0.9:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_09.append(occluded_images)
                    data_09.append(data)
                    label_09.append(y)
                elif percent == 1:
                    occluded_images = random_remove(data, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_10.append(occluded_images)
                    data_10.append(data)
                    label_10.append(y)
                else:
                    raise ValueError("attribution?")
            else:
                if percent == 0.0:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_00.append(occluded_images)
                    data_00.append(data)
                    label_00.append(y)
                elif percent == 0.1:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_01.append(occluded_images)
                    data_01.append(data)
                    label_01.append(y)
                elif percent == 0.2:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_02.append(occluded_images)
                    data_02.append(data)
                    label_02.append(y)
                elif percent == 0.3:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_03.append(occluded_images)
                    data_03.append(data)
                    label_03.append(y)
                elif percent == 0.4:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_04.append(occluded_images)
                    data_04.append(x)
                    label_04.append(y)
                elif percent == 0.5:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_05.append(occluded_images)
                    data_05.append(x)
                    label_05.append(y)
                elif percent == 0.6:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_06.append(occluded_images)
                    data_06.append(x)
                    label_06.append(y)
                elif percent == 0.7:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_07.append(occluded_images)
                    data_07.append(x)
                    label_07.append(y)
                elif percent == 0.8:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_08.append(occluded_images)
                    data_08.append(x)
                    label_08.append(y)
                elif percent == 0.9:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_09.append(occluded_images)
                    data_09.append(x)
                    label_09.append(y)
                elif percent == 1:
                    print(" percent : {}".format(percent))
                    occluded_images = remove(data, batch_attrs, percent, keep)
                    raw_image = render.digit_to_rgb(data, scaling = 1)
                    prdigit = render.digit_to_rgb(occluded_images, scaling = 1)
                    test_image = render.save_image([raw_image, prdigit],'../{}_KAR_check_point_{}.png'.format(attribution, percent))
                    hmaps_10.append(occluded_images)
                    data_10.append(x)
                    label_10.append(y)
                else:
                    raise ValueError("Error : {}".format(percent))

        
        
        
#         hmaps.append(attrs)
#         data.append(x)
#         label.append(y)
#         print("print final : {}".format(len(hmaps)))
    print("Interpretation is done, concatenate...")
    hmaps_00 = np.concatenate(hmaps_00, axis=0)
    hmaps_01 = np.concatenate(hmaps_01, axis=0)
    hmaps_02 = np.concatenate(hmaps_02, axis=0)
    hmaps_03 = np.concatenate(hmaps_03, axis=0)
    hmaps_04 = np.concatenate(hmaps_04, axis=0)
    hmaps_05 = np.concatenate(hmaps_05, axis=0)
    hmaps_06 = np.concatenate(hmaps_06, axis=0)
    hmaps_07 = np.concatenate(hmaps_07, axis=0)
    hmaps_08 = np.concatenate(hmaps_08, axis=0)
    hmaps_09 = np.concatenate(hmaps_09, axis=0)
    hmaps_10 = np.concatenate(hmaps_10, axis=0)
    data_00 = np.concatenate(data_00, axis = 0)
    data_01 = np.concatenate(data_01, axis = 0)
    data_02 = np.concatenate(data_02, axis = 0)
    data_03 = np.concatenate(data_03, axis = 0)
    data_04 = np.concatenate(data_04, axis = 0)
    data_05 = np.concatenate(data_05, axis = 0)
    data_06 = np.concatenate(data_06, axis = 0)
    data_07 = np.concatenate(data_07, axis = 0)
    data_08 = np.concatenate(data_08, axis = 0)
    data_09 = np.concatenate(data_09, axis = 0)
    data_10 = np.concatenate(data_10, axis = 0)
    label_00 = np.concatenate(label_00, axis = 0)
    label_01 = np.concatenate(label_01, axis = 0)
    label_02 = np.concatenate(label_02, axis = 0)
    label_03 = np.concatenate(label_03, axis = 0)
    label_04 = np.concatenate(label_04, axis = 0)
    label_05 = np.concatenate(label_05, axis = 0)
    label_06 = np.concatenate(label_06, axis = 0)
    label_07 = np.concatenate(label_07, axis = 0)
    label_08 = np.concatenate(label_08, axis = 0)
    label_09 = np.concatenate(label_09, axis = 0)
    label_10 = np.concatenate(label_10, axis = 0)
    
#     hmaps = np.concatenate(hmaps, axis=0)
#     data = np.concatenate(data, axis = 0)
    
    print("concatenate is done...")
#     print("print final : {}".format(hmaps.shape))
#     percentiles = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90]
    for percent in tqdm(percentiles):
        if percent == 0.0:
            save(hmaps_00, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_00, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 0.1:
            save(hmaps_01, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_01, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 0.2:
            save(hmaps_02, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_02, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 0.3:
            save(hmaps_03, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_03, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 0.4:
            save(hmaps_04, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_04, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 0.5:
            save(hmaps_05, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_05, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 0.6:
            save(hmaps_06, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_06, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 0.7:
            save(hmaps_07, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_07, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 0.8:
            save(hmaps_08, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_08, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 0.9:
            save(hmaps_09, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_09, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        elif percent == 1:
            save(hmaps_10, savedir + '{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent))
            save(label_10, savedir + '{}_{}_{}_{}.pickle'.format('test' if test else 'train', attribution, percent, 'label'))
            print("Occlude image {} percentile...".format(percent))
        else:
            print("error")
    del hmaps_00,hmaps_01,hmaps_02,hmaps_03,hmaps_04,hmaps_05,hmaps_06,hmaps_07,hmaps_08,hmaps_09,hmaps_10,data_00,data_01,data_02,data_03,data_04,data_05,data_06,data_07,data_08,data_09,data_10,label_00,label_01,label_02,label_03,label_04,label_05,label_06,label_07,label_08,label_09,label_10
Ejemplo n.º 25
0
    #this is the XOR problem.
    X = np.random.rand(N,D) #we want [NxD] data
    X = (X > 0.5)*1.0
    Y = X[:,0] == X[:,1]
    Y = (np.vstack((Y, np.invert(Y)))*1.0).T # and [NxC] labels

    X += np.random.randn(N,D)*0.1 # add some noise to the data.

    #build a network
    nn = modules.Sequential([modules.Linear(2,3), modules.Tanh(),modules.Linear(3,15), modules.Tanh(), modules.Linear(15,15), modules.Tanh(), modules.Linear(15,3), modules.Tanh() ,modules.Linear(3,2), modules.SoftMax()])
    #train the network.
    nn.train(X,Y, batchsize = 5, iters=1000)
    acc = np.mean(np.argmax(nn.forward(X), axis=1) == np.argmax(Y, axis=1))
    if not np == numpy: # np=cupy
        acc = np.asnumpy(acc)
    print('model train accuracy is: {:0.4f}'.format(acc))

    #save the network
    model_io.write(nn, '../xor_net_small_1000.txt')

if train_mnist:

    Xtrain = data_io.read('../data/MNIST/train_images.npy')
    Ytrain = data_io.read('../data/MNIST/train_labels.npy')
    Xtest = data_io.read('../data/MNIST/test_images.npy')
    Ytest = data_io.read('../data/MNIST/test_labels.npy')

    # transfer pixel values from [0 255] to [-1 1] to satisfy the expected input / training paradigm of the model
    Xtrain =  Xtrain / 127.5 - 1
    Xtest =  Xtest / 127.5 - 1
Ejemplo n.º 26
0
                   (np.sqrt(v6 / (1 - beta_2)) + eps)) * (m6 / (1 - beta_1))

    G_W2 = G_W2 - (learing_rate /
                   (np.sqrt(v7 / (1 - beta_2)) + eps)) * (m7 / (1 - beta_1))
    G_b2 = G_b2 - (learing_rate /
                   (np.sqrt(v8 / (1 - beta_2)) + eps)) * (m8 / (1 - beta_1))

    # --- Print Error ----
    print("Current Iter: ",
          iter,
          " Current D cost:",
          D_cost,
          " Current G cost: ",
          G_cost,
          end='\r')

    # ---- Print to Out put ----
    if iter % 1000 == 0:
        Z = np.random.uniform(-1., 1., size=[16, G_input])
        Gl1 = Z.dot(G_W1) + G_b1
        Gl1A = ReLu(Gl1)
        Gl2 = Gl1A.dot(G_W2) + G_b2
        current_fake_data = log(Gl2)

        fig = plot(np.asnumpy(current_fake_data))
        plt.savefig('temp_GPU/{}.png'.format(
            str(iter).zfill(3) + "_" + str(G_input) + "_" + str(hidden_input)),
                    bbox_inches='tight')
        plt.close(fig)

# -- end code --
Ejemplo n.º 27
0
    def train(self, X, Y,  Xval = [], Yval = [],  batchsize = 25, iters = 10000, lrate = 0.005, lrate_decay = None, lfactor_initial=1.0 , status = 250, convergence = -1, transform = None):
        '''
        Provides a method for training the neural net (self) based on given data.

        Parameters
        ----------

        X : numpy.ndarray
            the training data, formatted to (N,D) shape, with N being the number of samples and D their dimensionality

        Y : numpy.ndarray
            the training labels, formatted to (N,C) shape, with N being the number of samples and C the number of output classes.

        Xval : numpy.ndarray
            some optional validation data. used to measure network performance during training.
            shaped (M,D)

        Yval : numpy.ndarray
            the validation labels. shaped (M,C)

        batchsize : int
            the batch size to use for training

        iters : int
            max number of training iterations

        lrate : float
            the initial learning rate. the learning rate is adjusted during training with increased model performance. See lrate_decay

        lrate_decay : string
            controls if and how the learning rate is adjusted throughout training:
            'none' or None disables learning rate adaption. This is the DEFAULT behaviour.
            'sublinear' adjusts the learning rate to lrate*(1-Accuracy**2) during an evaluation step, often resulting in a better performing model.
            'linear' adjusts the learning rate to lrate*(1-Accuracy) during an evaluation step, often resulting in a better performing model.

        lfactor_initial : float
            specifies an initial discount on the given learning rate, e.g. when retraining an established network in combination with a learning rate decay,
            it might be undesirable to use the given learning rate in the beginning. this could have been done better. TODO: do better.
            Default value is 1.0

        status : int
            number of iterations (i.e. number of rounds of batch forward pass, gradient backward pass, parameter update) of silent training
            until status print and evaluation on validation data.

        convergence : int
            number of consecutive allowed status evaluations with no more model improvements until we accept the model has converged.
            Set <=0 to disable. Disabled by DEFAULT.
            Set to any value > 0 to control the maximal consecutive number (status * convergence) iterations allowed without model improvement, until convergence is accepted.

        transform : function handle
            a function taking as an input a batch of training data sized [N,D] and returning a batch sized [N,D] with added noise or other various data transformations. It's up to you!
            default value is None for no transformation.
            expected syntax is, with X.shape == Xt.shape == (N,D)
            def yourFunction(X):
                Xt = someStuff(X)
                return Xt
        '''

        def randperm(N,b):
            '''
            helper method for picking b unique random indices from a range [0,N[.
            we do not use numpy.random.permutation or numpy.random.choice
            due to known severe performance issues with drawing without replacement.
            if the ratio of N/b is high enough, we should see a huge performance gain.

            N : int
                range of indices [0,N[ to choose from.m, s = divmod(seconds, 60)


            b : the number of unique indices to pick.
            '''
            assert(b <= N) # if this fails no valid solution can be found.
            I = numpy.arange(0)
            while I.size < b:
                I = numpy.unique(numpy.append(I,numpy.random.randint(0,N,[b-I.size,])))
            return np.array(I)

        t_start = time.time()
        untilConvergence = convergence;    learningFactor = lfactor_initial
        bestAccuracy = 0.0;                bestLayers = copy.deepcopy(self.modules)
        bestLoss = np.Inf;                 bestIter = 0

        N = X.shape[0]
        for d in range(iters):

            #the actual training:
            #first, pick samples at random
            samples = randperm(N,batchsize)

            #transform batch data (maybe)
            if transform == None:
                batch = X[samples,:]
            else:
                batch = transform(X[samples,:])

            #forward and backward propagation steps with parameter update
            Ypred = self.forward(batch)
            self.backward(Ypred - Y[samples,:]) #l1-loss
            self.update(lrate*learningFactor)

            #periodically evaluate network and optionally adjust learning rate or check for convergence.
            if (d+1) % status == 0:
                if not len(Xval) == 0 and not len(Yval) == 0: #if given, evaluate on validation data
                    Ypred = self.forward(Xval)
                    acc = np.mean(np.argmax(Ypred, axis=1) == np.argmax(Yval, axis=1))
                    l1loss = np.abs(Ypred - Yval).sum()/Yval.shape[0]
                    if not np == numpy: acc = np.asnumpy(acc); l1loss = np.asnumpy(l1loss)
                    print('Accuracy after {0} iterations on validation set: {1}% (l1-loss: {2:.4})'.format(d+1, acc*100, l1loss))

                else: #evaluate on the training data only
                    Ypred = self.forward(X)
                    acc = np.mean(np.argmax(Ypred, axis=1) == np.argmax(Y, axis=1))
                    l1loss = np.abs(Ypred - Y).sum()/Y.shape[0]
                    if not numpy == np: acc = np.asnumpy(acc); l1loss = np.asnumpy(l1loss)
                    print('Accuracy after {0} iterations on training data: {1}% (l1-loss: {2:.4})'.format(d+1,acc*100,l1loss))


                #save current network parameters if we have improved
                #if acc >= bestAccuracy and l1loss <= bestLoss:
                # only go by loss
                if l1loss <= bestLoss:
                    print('    New loss-optimal parameter set encountered. saving....')
                    bestAccuracy = acc
                    bestLoss = l1loss
                    bestLayers = copy.deepcopy(self.modules)
                    bestIter = d

                    #adjust learning rate
                    if lrate_decay == None or lrate_decay == 'none':
                        pass # no adjustment
                    elif lrate_decay == 'sublinear':
                        #slow down learning to better converge towards an optimum with increased network performance.
                        learningFactor = 1.-(acc*acc)
                        print('    Adjusting learning rate to {0} ~ {1}% of its initial value'.format(learningFactor*lrate, numpy.round(learningFactor*100,2)))
                    elif lrate_decay == 'linear':
                        #slow down learning to better converge towards an optimum with increased network performance.
                        learningFactor = 1.-acc
                        print('    Adjusting learning rate to {0} ~ {1}% of its initial value'.format(learningFactor*lrate, numpy.round(learningFactor*100,2)))

                    #refresh number of allowed search steps until convergence
                    untilConvergence = convergence
                else:
                    untilConvergence-=1
                    if untilConvergence == 0 and convergence > 0:
                        print('    No more recorded model improvements for {0} evaluations. Accepting model convergence.'.format(convergence))
                        break

                t_elapsed =  time.time() - t_start
                percent_done = float(d+1)/iters #d+1 because we are after the iteration's heavy lifting
                t_remaining_estimated = t_elapsed/percent_done - t_elapsed

                t_m, t_s = divmod(t_remaining_estimated, 60)
                t_h, t_m = divmod(t_m, 60)
                t_d, t_h = divmod(t_h, 24)

                timestring = '{}d {}h {}m {}s'.format(int(t_d), int(t_h), int(t_m), int(t_s))
                print('    Estimate time until current training ends : {} ({:.2f}% done)'.format(timestring, percent_done*100))

            elif (d+1) % (status/10) == 0:
                # print 'alive' signal
                #sys.stdout.write('.')
                l1loss = np.abs(Ypred - Y[samples,:]).sum()/Ypred.shape[0]
                if not np == numpy: l1loss = np.asnumpy(l1loss)
                sys.stdout.write('batch# {}, lrate {}, l1-loss {:.4}\n'.format(d+1,lrate*learningFactor,l1loss))
                sys.stdout.flush()

        #after training, either due to convergence or iteration limit
        t_elapsed =  time.time() - t_start
        m, s = divmod(t_elapsed, 60)
        h, m = divmod(m, 60)
        d, h = divmod(h, 24)
        timestring = '{}d {}h {}m {}s'.format(int(d), int(h), int(m), int(s))
        print('Training terminated after {}'.format(timestring))
        print('Setting network parameters to best encountered network state with {}% accuracy and a loss of {} from iteration {}.'.format(bestAccuracy*100, bestLoss, bestIter))
        self.modules = bestLayers
Ejemplo n.º 28
0
def to_cpu(x):
    if type(x) == np.ndarray:
        return x
    return np.asnumpy(x)
Ejemplo n.º 29
0
def to_cpu(x):
    import numpy
    if type(x) == numpy.ndarray:
        return x
    return np.asnumpy(x)
Ejemplo n.º 30
0
# transfer pixel values from [0 255] to [-1 1] to satisfy the expected input / training paradigm of the model
X =  X / 127.5 - 1.

#reshape the vector representations in X to match the requirements of the CNN input
X = np.reshape(X,[X.shape[0],28,28,1])
X = np.pad(X,((0,0),(2,2),(2,2),(0,0)), 'constant', constant_values = (-1.,))

# transform numeric class labels to vector indicator for uniformity. assume presence of all classes within the label set
I = Y[:,0].astype(int)
Y = np.zeros([X.shape[0],np.unique(Y).size])
Y[np.arange(Y.shape[0]),I] = 1

acc = np.mean(np.argmax(nn.forward(X), axis=1) == np.argmax(Y, axis=1))
if not np == numpy: # np=cupy
    acc = np.asnumpy(acc)
print('model test accuracy is: {:0.4f}'.format(acc))

#permute data order for demonstration. or not. your choice.
I = np.arange(X.shape[0])
#I = np.random.permutation(I)

#predict and perform LRP for the 10 first samples
for i in I[:10]:
    x = X[i:i+1,...]

    #forward pass and prediction
    ypred = nn.forward(x)
    print('True Class:     ', np.argmax(Y[i]))
    print('Predicted Class:', np.argmax(ypred),'\n')