def infererence(args):
    groups = 8

    print('Loading image')
    image = Image.open(args.image)
    print('Preprocessing')
    transformer = get_transformer()
    input_data = preprocess(image, transformer)

    print('input_data  ', input_data.shape)
    #conv layer
    w = np.load('./data/' + 'module.conv1.weight.npy')
    b = np.load('./data/' + 'module.conv1.bias.npy')
    conv_layer = Convolution(w, b, stride=2, pad=1)
    out = conv_layer.forward(input_data)
    #savetxt('./dump/' + 'conv1_out.txt', out)

    #max pooling
    maxpool_layer = Pooling(3, 3, 2, 1)
    out = maxpool_layer.forward(out)
    #savetxt('./dump/' + 'maxpool_out.txt', out)

    out = stage_shuffle(out, stage2_str, 3, groups)
    #savetxt('./dump/' + 'stage2.txt', out)
    out = stage_shuffle(out, stage3_str, 7, groups)
    #savetxt('./dump/' + 'stage3.txt', out)
    out = stage_shuffle(out, stage4_str, 3, groups)
    #savetxt('./dump/' + 'stage4.txt', out)

    h, w = out.shape[-2:]
    avgpool_layer = AVGPooling(h, w, 1, 0)
    out = avgpool_layer.forward(out).reshape(1, -1)

    w = np.load('./data/' + 'module.fc.weight.npy')
    b = np.load('./data/' + 'module.fc.bias.npy')
    w = w.transpose(1, 0)

    fc_layer = Affine(w, b)
    out = fc_layer.forward(out)

    softmax_layer = Softmax()
    out = softmax_layer.forward(out).reshape(-1)

    result = []
    with open(args.idx_to_class) as json_file:
        json_data = json.load(json_file)
        '''
        for key in json_data:
            print(key, json_data[key])
        '''
    for i in range(0, 1000):
        item = (out[i], json_data[str(i)])
        result.append(item)

    result = sorted(result, key=lambda item: item[0], reverse=True)
    for i in range(0, 10):
        print(result[i])
def stage_shuffle(input_data, stage, repeat_num, groups):
    avgpool_layer = AVGPooling(3, 3, 2, 1)
    residual = avgpool_layer.forward(input_data)
    #savetxt('./dump/' + 'avg_pool.txt', residual)

    w = np.load(stage + '0.g_conv_1x1_compress.conv1x1.weight.npy')
    b = np.load(stage + '0.g_conv_1x1_compress.conv1x1.bias.npy')

    if 'Stage2' in stage:
        conv_layer = Convolution(w, b, stride=1, pad=0)
    else:
        conv_layer = GroupConvolution(w, b, stride=1, pad=0, groups=groups)
    out = conv_layer.forward(input_data)
    out_N, out_C, out_H, out_W = out.shape

    gamma = np.load(stage +
                    '0.g_conv_1x1_compress.batch_norm.weight.npy').reshape(
                        (-1, 1))
    beta = np.load(stage +
                   '0.g_conv_1x1_compress.batch_norm.bias.npy').reshape(
                       (-1, 1))
    mean = np.load(
        stage + '0.g_conv_1x1_compress.batch_norm.running_mean.npy').reshape(
            (-1, 1))
    var = np.load(stage +
                  '0.g_conv_1x1_compress.batch_norm.running_var.npy').reshape(
                      (-1, 1))
    bn_layer = BatchNormalization(gamma,
                                  beta,
                                  running_mean=mean,
                                  running_var=var)
    out = bn_layer.forward(out.reshape(out_C, -1), train_flg=False)
    relu_layer = Relu()
    out = relu_layer.forward(out).reshape(out_N, out_C, out_H, out_W)
    #savetxt('./dump/' + '1x1_comp.txt', out)

    out = channel_shuffle(out, groups)
    #savetxt('./dump/' + 'channel_shuffle.txt', out)

    w = np.load(stage + '0.depthwise_conv3x3.weight.npy').transpose(1, 0, 2, 3)
    b = np.load(stage + '0.depthwise_conv3x3.bias.npy')
    dwconv_layer = DWConvolution(w, b, stride=2, pad=1)
    out = dwconv_layer.forward(out)
    #savetxt('./dump/' + 'dwconv.txt', out)

    gamma = np.load(stage + '0.bn_after_depthwise.weight.npy').reshape((-1, 1))
    beta = np.load(stage + '0.bn_after_depthwise.bias.npy').reshape((-1, 1))
    mean = np.load(stage + '0.bn_after_depthwise.running_mean.npy').reshape(
        (-1, 1))
    var = np.load(stage + '0.bn_after_depthwise.running_var.npy').reshape(
        (-1, 1))
    bn_layer = BatchNormalization(gamma,
                                  beta,
                                  running_mean=mean,
                                  running_var=var)
    out_N, out_C, out_H, out_W = out.shape
    out = bn_layer.forward(out.reshape(out_C, -1),
                           train_flg=False).reshape(out_N, out_C, out_H, out_W)
    #savetxt('./dump/' + 'after_bn.txt', out)

    w = np.load(stage + '0.g_conv_1x1_expand.conv1x1.weight.npy')
    b = np.load(stage + '0.g_conv_1x1_expand.conv1x1.bias.npy')
    groupconv_layer = GroupConvolution(w, b, stride=1, pad=0, groups=groups)
    out = groupconv_layer.forward(out)

    gamma = np.load(stage +
                    '0.g_conv_1x1_expand.batch_norm.weight.npy').reshape(
                        (-1, 1))
    beta = np.load(stage + '0.g_conv_1x1_expand.batch_norm.bias.npy').reshape(
        (-1, 1))
    mean = np.load(stage +
                   '0.g_conv_1x1_expand.batch_norm.running_mean.npy').reshape(
                       (-1, 1))
    var = np.load(stage +
                  '0.g_conv_1x1_expand.batch_norm.running_var.npy').reshape(
                      (-1, 1))
    bn_layer = BatchNormalization(gamma,
                                  beta,
                                  running_mean=mean,
                                  running_var=var)
    out_N, out_C, out_H, out_W = out.shape
    out = bn_layer.forward(out.reshape(out_C, -1),
                           train_flg=False).reshape(out_N, out_C, out_H, out_W)
    #savetxt('./dump/' + 'gconv.txt', out)

    out = np.concatenate((residual, out), 1)
    #savetxt('./dump/' + 'combine.txt', out)
    relu_layer = Relu()
    out_N, out_C, out_H, out_W = out.shape
    out = relu_layer.forward(out).reshape(out_N, out_C, out_H, out_W)
    #savetxt('./dump/' + 'stage2.txt', out)

    for i in range(1, repeat_num + 1):
        residual = out
        w = np.load(stage + str(i) + '.g_conv_1x1_compress.conv1x1.weight.npy')
        b = np.load(stage + str(i) + '.g_conv_1x1_compress.conv1x1.bias.npy')
        groupconv_layer = GroupConvolution(w,
                                           b,
                                           stride=1,
                                           pad=0,
                                           groups=groups)
        out = groupconv_layer.forward(out)
        out_N, out_C, out_H, out_W = out.shape

        gamma = np.load(stage + str(i) +
                        '.g_conv_1x1_compress.batch_norm.weight.npy').reshape(
                            (-1, 1))
        beta = np.load(stage + str(i) +
                       '.g_conv_1x1_compress.batch_norm.bias.npy').reshape(
                           (-1, 1))
        mean = np.load(
            stage + str(i) +
            '.g_conv_1x1_compress.batch_norm.running_mean.npy').reshape(
                (-1, 1))
        var = np.load(
            stage + str(i) +
            '.g_conv_1x1_compress.batch_norm.running_var.npy').reshape((-1, 1))
        bn_layer = BatchNormalization(gamma,
                                      beta,
                                      running_mean=mean,
                                      running_var=var)
        out = bn_layer.forward(out.reshape(out_C, -1), train_flg=False)
        relu_layer = Relu()
        out = relu_layer.forward(out).reshape(out_N, out_C, out_H, out_W)
        #savetxt('./dump/' + str(i) + '_1x1_comp.txt', out)

        out = channel_shuffle(out, groups)
        #savetxt('./dump/' + 'channel_shuffle.txt', out)

        w = np.load(stage + str(i) +
                    '.depthwise_conv3x3.weight.npy').transpose(1, 0, 2, 3)
        b = np.load(stage + str(i) + '.depthwise_conv3x3.bias.npy')
        dwconv_layer = DWConvolution(w, b, stride=1, pad=1)
        out = dwconv_layer.forward(out)
        #savetxt('./dump/' + 'dwconv.txt', out)

        gamma = np.load(stage + str(i) +
                        '.bn_after_depthwise.weight.npy').reshape((-1, 1))
        beta = np.load(stage + str(i) +
                       '.bn_after_depthwise.bias.npy').reshape((-1, 1))
        mean = np.load(stage + str(i) +
                       '.bn_after_depthwise.running_mean.npy').reshape((-1, 1))
        var = np.load(stage + str(i) +
                      '.bn_after_depthwise.running_var.npy').reshape((-1, 1))
        bn_layer = BatchNormalization(gamma,
                                      beta,
                                      running_mean=mean,
                                      running_var=var)
        out_N, out_C, out_H, out_W = out.shape
        out = bn_layer.forward(out.reshape(out_C, -1),
                               train_flg=False).reshape(
                                   out_N, out_C, out_H, out_W)
        #savetxt('./dump/' + 'after_bn.txt', out)

        w = np.load(stage + str(i) + '.g_conv_1x1_expand.conv1x1.weight.npy')
        b = np.load(stage + str(i) + '.g_conv_1x1_expand.conv1x1.bias.npy')
        groupconv_layer = GroupConvolution(w,
                                           b,
                                           stride=1,
                                           pad=0,
                                           groups=groups)
        out = groupconv_layer.forward(out)

        gamma = np.load(stage + str(i) +
                        '.g_conv_1x1_expand.batch_norm.weight.npy').reshape(
                            (-1, 1))
        beta = np.load(stage + str(i) +
                       '.g_conv_1x1_expand.batch_norm.bias.npy').reshape(
                           (-1, 1))
        mean = np.load(
            stage + str(i) +
            '.g_conv_1x1_expand.batch_norm.running_mean.npy').reshape((-1, 1))
        var = np.load(stage + str(i) +
                      '.g_conv_1x1_expand.batch_norm.running_var.npy').reshape(
                          (-1, 1))
        bn_layer = BatchNormalization(gamma,
                                      beta,
                                      running_mean=mean,
                                      running_var=var)
        out_N, out_C, out_H, out_W = out.shape
        out = bn_layer.forward(out.reshape(out_C, -1),
                               train_flg=False).reshape(
                                   out_N, out_C, out_H, out_W)
        #savetxt('./dump/' + 'gconv.txt', out)

        out = np.add(residual, out)
        #savetxt('./dump/' + str(i) + '_combine.txt', out)
        relu_layer = Relu()
        out_N, out_C, out_H, out_W = out.shape
        out = relu_layer.forward(out).reshape(out_N, out_C, out_H, out_W)
        #savetxt('./dump/' + str(i) + '_stage.txt', out)
    return out
network = SimpleConvNet(input_dim=(1,28,28), 
                        conv_param = {'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
                        hidden_size=100, output_size=10, weight_init_std=0.01)

# 学習後の重み
network.load_params("params.pkl")

filter_show(network.params['W1'], 16)

img = imread('../dataset/lena_gray.png')
img = img.reshape(1, 1, *img.shape)

fig = plt.figure()

w_idx = 1

for i in range(16):
    w = network.params['W1'][i]
    b = 0  # network.params['b1'][i]

    w = w.reshape(1, *w.shape)
    #b = b.reshape(1, *b.shape)
    conv_layer = Convolution(w, b) 
    out = conv_layer.forward(img)
    out = out.reshape(out.shape[2], out.shape[3])
    
    ax = fig.add_subplot(4, 4, i+1, xticks=[], yticks=[])
    ax.imshow(out, cmap=plt.cm.gray_r, interpolation='nearest')

plt.show()
Beispiel #4
0
                        },
                        hidden_size=100,
                        output_size=10,
                        weight_init_std=0.01)

# 学习后的权重
network.load_params("params.pkl")

filter_show(network.params['W1'], 16)

img = imread('../dataset/lena_gray.png')
img = img.reshape(1, 1, *img.shape)

fig = plt.figure()

w_idx = 1

for i in range(16):
    w = network.params['W1'][i]
    b = 0  # network.params['b1'][i]

    w = w.reshape(1, *w.shape)
    #b = b.reshape(1, *b.shape)
    conv_layer = Convolution(w, b)
    out = conv_layer.forward(img)
    out = out.reshape(out.shape[2], out.shape[3])

    ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
    ax.imshow(out, cmap=plt.cm.gray_r, interpolation='nearest')

plt.show()
    # pickle 파일에 저장된 파라미터를 cnn의 필드로 로드
    cnn.load_params('cnn_params.pkl')
    after_filters = cnn.params['W1']
    # 학습 끝난 후 갱신(업데이트)된 파라미터를 그래프로 출력
    show_filters(after_filters, 16, 4)

    # 학습 끝난 후 갱신된 파라미터를 실제 이미지 파일에 적용
    lena = plt.imread('lena_gray.png')  # imread는 png만 바로 shape으로 적용하게 해준다
    # jpg는 PIL를 사용해서 열어야한다
    print(lena.shape)  #(256, 256) #이미지 파일이 numpy array로 변환됨 -> 2차원

    # 이미지 데이터를 convolution layer의 forward()메소드에 전달
    # 우리의 convolution 레이어는 오직 4차원만 받음 -> 레나 이미지를 4차원으로 만들어줘야한다
    # 2차원 배열을 4차원 배열로 변환
    lena = lena.reshape(1, 1, *lena.shape)  # *lena.shape = (256, 256)
    for i in range(16):  # 필터 16개에 대해서 반복
        w = cnn.params['W1'][i]  # 갱신된 필터w
        # b = cnn.params['b1'][i] #갱신된 bias
        b = 0  # did not use bias because the image might get distorted
        w = w.reshape(1, *w.shape)  #3d -> 4d
        conv = Convolution(w, b)  # convolution 레이어 생성
        # 필터 - 학습이 끝난 상태의 필터 (5x5의 작은 필터를 보낸것)
        out = conv.forward(lena)  #이미지 필터에 적용
        # pyplot을 사용하기 위해서 4d-> 2d
        out = out.reshape(out.shape[2], out.shape[3])
        # subplot에 그림
        plt.subplot(4, 4, i + 1, xticks=[], yticks=[])
        plt.imshow(out, cmap='gray')
    plt.show()
Beispiel #6
0
    show_filters(before_filters, num_filters=16, ncols=4)

    # 학습이 끝난 후 파라미터
    cnn.load_params('cnn_params.pkl')
    after_filters = cnn.params['W1']

    # 학습 끝난 후 갱신된 파라미터 그래프로 출력
    show_filters(after_filters, 16, 4)

    # 학습 끝난 후 갱신 된 파라미터를 실제 이미지 파일에 적용
    lena = plt.imread(
        'lena_gray.png'
    )  # numpy array 로 이미지 열어진다. 단, png 파일만. jpeg 의 경우 외부 패키지 필요
    print('lena shape =', lena.shape)  # (256, 256) ndarray

    # 이미지 데이터를 Convolution 레이어의 forward() 메소드에 전달하기 위해서 2차원 배열을 4차원 배열로 변환
    lena = lena.reshape(1, 1, *lena.shape)  # * : 튜플 내용을 하나씩 꺼내준다. (256, 256)
    print('lena shape =', lena.shape)
    for i in range(16):  # 필터 16개에 대해 반복

        w = cnn.params['W1'][i]  # 갱신된 필터
        b = 0  # 바이어스 사용 안함
        w = w.reshape(1, *w.shape)  # 3차원 -> 4차원
        conv = Convolution(w, b)  # Convolution 레이어 생성
        out = conv.forward(lena)  # 이미지에 필터 적용
        # pyplot 을 사용하기 위해거 4차원을 2차원으로 변환
        out = out.reshape(out.shape[2], out.shape[3])
        plt.subplot(4, 4, i + 1, xticks=[], yticks=[])
        plt.imshow(out, cmap='gray')
    plt.show()