def __init__(self, input_dim=(1, 28, 28), conv_param_1={'filter_num': 16, 'filter_size': 3, 'pad': 1, 'stride': 1}, conv_param_2={'filter_num': 16, 'filter_size': 3, 'pad': 1, 'stride': 1}, conv_param_3={'filter_num': 32, 'filter_size': 3, 'pad': 1, 'stride': 1}, conv_param_4={'filter_num': 32, 'filter_size': 3, 'pad': 2, 'stride': 1}, conv_param_5={'filter_num': 64, 'filter_size': 3, 'pad': 1, 'stride': 1}, conv_param_6={'filter_num': 64, 'filter_size': 3, 'pad': 1, 'stride': 1}, hidden_size=50, output_size=10): pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size]) weight_init_scale = np.sqrt(2.0 / pre_node_nums) # weights init self.params = {} pre_channel_num = input_dim[0] for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]): self.params['w'+str(idx+1)] = weight_init_scale[idx] *\ np.random.randn( conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size']) self.params['b'+str(idx+1)] = np.zeros(conv_param['filter_num']) pre_channel_num = conv_param['filter_num'] self.params['w7'] = weight_init_scale[6] * np.random.randn(64*4*4, hidden_size) self.params['b7'] = np.zeros(hidden_size) self.params['w8'] = weight_init_scale[7] * np.random.randn(hidden_size, output_size) self.params['b8'] = np.zeros(output_size) # gen layers self.layers = [] self.layers.append(Convolution(self.params['w1'], self.params['b1'], conv_param_1['stride'], conv_param_1['pad'])) self.layers.append(Relu()) self.layers.append(Convolution(self.params['w2'], self.params['b2'], conv_param_2['stride'], conv_param_2['pad'])) self.layers.append(Relu()) self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2)) self.layers.append(Convolution(self.params['w3'], self.params['b3'], conv_param_3['stride'], conv_param_3['pad'])) self.layers.append(Relu()) self.layers.append(Convolution(self.params['w4'], self.params['b4'], conv_param_4['stride'], conv_param_4['pad'])) self.layers.append(Relu()) self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2)) self.layers.append(Convolution(self.params['w5'], self.params['b5'], conv_param_5['stride'], conv_param_5['pad'])) self.layers.append(Relu()) self.layers.append(Convolution(self.params['w6'], self.params['b6'], conv_param_6['stride'], conv_param_6['pad'])) self.layers.append(Relu()) self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2)) self.layers.append(Affine(self.params['w7'], self.params['b7'])) self.layers.append(Relu()) self.layers.append(Dropout(0.5)) self.layers.append(Affine(self.params['w8'], self.params['b8'])) self.layers.append(Dropout(0.5)) self.last_layer = SoftmaxWithLoss()
def infererence(args): groups = 8 print('Loading image') image = Image.open(args.image) print('Preprocessing') transformer = get_transformer() input_data = preprocess(image, transformer) print('input_data ', input_data.shape) #conv layer w = np.load('./data/' + 'module.conv1.weight.npy') b = np.load('./data/' + 'module.conv1.bias.npy') conv_layer = Convolution(w, b, stride=2, pad=1) out = conv_layer.forward(input_data) #savetxt('./dump/' + 'conv1_out.txt', out) #max pooling maxpool_layer = Pooling(3, 3, 2, 1) out = maxpool_layer.forward(out) #savetxt('./dump/' + 'maxpool_out.txt', out) out = stage_shuffle(out, stage2_str, 3, groups) #savetxt('./dump/' + 'stage2.txt', out) out = stage_shuffle(out, stage3_str, 7, groups) #savetxt('./dump/' + 'stage3.txt', out) out = stage_shuffle(out, stage4_str, 3, groups) #savetxt('./dump/' + 'stage4.txt', out) h, w = out.shape[-2:] avgpool_layer = AVGPooling(h, w, 1, 0) out = avgpool_layer.forward(out).reshape(1, -1) w = np.load('./data/' + 'module.fc.weight.npy') b = np.load('./data/' + 'module.fc.bias.npy') w = w.transpose(1, 0) fc_layer = Affine(w, b) out = fc_layer.forward(out) softmax_layer = Softmax() out = softmax_layer.forward(out).reshape(-1) result = [] with open(args.idx_to_class) as json_file: json_data = json.load(json_file) ''' for key in json_data: print(key, json_data[key]) ''' for i in range(0, 1000): item = (out[i], json_data[str(i)]) result.append(item) result = sorted(result, key=lambda item: item[0], reverse=True) for i in range(0, 10): print(result[i])
def __init__(self, input_dim=(1, 28, 28), conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1}, conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1}, conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1}, conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1}, conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1}, conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1}, hidden_size=50, output_size=10): # 重みの初期化=========== # 各層のニューロンひとつあたりが、前層のニューロンといくつのつながりがあるか(TODO:自動で計算する) pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size]) weight_init_scales = np.sqrt(2.0 / pre_node_nums) # ReLUを使う場合に推奨される初期値 self.params = {} pre_channel_num = input_dim[0] for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]): self.params['W' + str(idx+1)] = weight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size']) self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num']) pre_channel_num = conv_param['filter_num'] self.params['W7'] = weight_init_scales[6] * np.random.randn(64*4*4, hidden_size) self.params['b7'] = np.zeros(hidden_size) self.params['W8'] = weight_init_scales[7] * np.random.randn(hidden_size, output_size) self.params['b8'] = np.zeros(output_size) # レイヤの生成=========== self.layers = [] self.layers.append(Convolution(self.params['W1'], self.params['b1'], conv_param_1['stride'], conv_param_1['pad'])) self.layers.append(Relu()) self.layers.append(Convolution(self.params['W2'], self.params['b2'], conv_param_2['stride'], conv_param_2['pad'])) self.layers.append(Relu()) self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2)) self.layers.append(Convolution(self.params['W3'], self.params['b3'], conv_param_3['stride'], conv_param_3['pad'])) self.layers.append(Relu()) self.layers.append(Convolution(self.params['W4'], self.params['b4'], conv_param_4['stride'], conv_param_4['pad'])) self.layers.append(Relu()) self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2)) self.layers.append(Convolution(self.params['W5'], self.params['b5'], conv_param_5['stride'], conv_param_5['pad'])) self.layers.append(Relu()) self.layers.append(Convolution(self.params['W6'], self.params['b6'], conv_param_6['stride'], conv_param_6['pad'])) self.layers.append(Relu()) self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2)) self.layers.append(Affine(self.params['W7'], self.params['b7'])) self.layers.append(Relu()) self.layers.append(Dropout(0.5)) self.layers.append(Affine(self.params['W8'], self.params['b8'])) self.layers.append(Dropout(0.5)) self.last_layer = SoftmaxWithLoss()
def __init__(self, input_dim=(1, 28, 28), conv_param=None, hidden_size=100, output_size=10, weight_init_std=0.01, regularizer_lambda=0.1): # 卷积层的默认参数:默认情况下滤波器个数为30个,大小为5x5,不填充,步长1 if conv_param is None: conv_param = {'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1} filter_num = conv_param['filter_num'] filter_size = conv_param['filter_size'] filter_pad = conv_param['pad'] filter_stride = conv_param['stride'] input_size = input_dim[1] # 输入层的矩阵大小:单通道下二维矩阵的宽/高 conv_output_size = int((input_size + 2 * filter_pad - filter_size) / filter_stride + 1) # 卷积层输出的单个特征图的大小 # 最大池化层的输出大小:池化后保持特征图个数不变,由于使用的是2x2的最大 # 池化层,因此宽/高都变为原来的一半。 # 总的输出元素个数为:特征图个数 * (卷积层输出 / 2) * (卷积层输出 / 2) # 因为这里的简单CNN中池化层后面接全连接层, # 需要将池化层的节点拉平成一个一维数组 pool_output_size = int(filter_num * (conv_output_size / 2) ** 2) self.regularizer_lambda = regularizer_lambda # 正则化强度 # 初始化神经网络各层的参数:卷积层、(池化层)、全连接层、全连接层 # 其中池化层没有需要训练的参数,因此不需要初始化。 self.params = {} # 第一层(卷积层):滤波器的参数(权重参数) + 偏置参数 # 滤波器的参数有4个:滤波器个数、通道数、高、宽 self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size) # 卷积层的偏置参数:一个滤波器需要一个偏置,因此一共filter_num个偏置 self.params['b1'] = np.zeros(filter_num) # 全连接层(在这里是一个隐藏层)权重参数: # 输入节点数为池化层的所有节点个数,输出为隐藏层大小 self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size) self.params['b2'] = np.zeros(hidden_size) # 全连接层(在这里是输出层)权重参数: # 输入节点数为隐藏层的所有节点个数,输出为输出层大小 self.params['W3'] = weight_init_std * np.random.randn(hidden_size, output_size) self.params['b3'] = np.zeros(output_size) # 构造神经网络: # 卷积层、激活层(ReLU层)、最大池化层、 # 仿射层(隐藏层)、激活层(ReLU层)、仿射层(输出层) self.layers = OrderedDict() self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'], conv_param['stride'], conv_param['pad']) self.layers['ReLU1'] = ReLU() self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2) self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2']) self.layers['ReLU2'] = ReLU() self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3']) # 最后加入一层SoftmaxWithLoss层用于计算交叉熵误差,帮助训练神经网络 self.last_layer = SoftmaxWithLoss()
def __init__(self, input_dim=(1, 28, 28), conv_param={'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1}, hidden_size=100, output_size=10, weight_init_std=0.01): filter_num = conv_param['filter_num'] filter_size = conv_param['filter_size'] filter_pad = conv_param['pad'] filter_stride = conv_param['stride'] input_size = input_dim[1] conv_output_size = (input_size - filter_size + 2 * filter_pad) / filter_stride + 1 pool_output_size = int(filter_num * (conv_output_size / 2) * (conv_output_size / 2)) # 初始化权重 self.params = {'W1': weight_init_std * \ np.random.randn(filter_num, input_dim[0], filter_size, filter_size), 'b1': np.zeros(filter_num), 'W2': weight_init_std * \ np.random.randn(pool_output_size, hidden_size), 'b2': np.zeros(hidden_size), 'W3': weight_init_std * \ np.random.randn(hidden_size, output_size), 'b3': np.zeros(output_size)} # 生成层 self.layers = OrderedDict() self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'], conv_param['stride'], conv_param['pad']) self.layers['Relu1'] = ReLU() self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2) self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2']) self.layers['Relu2'] = ReLU() self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3']) self.last_layer = SoftmaxWithLoss()
def __init__(self, dim_in=(1, 28, 28), par={'num_filter': 30, 'size_filter': 5, 'pad': 0, 'stride': 1}, s_hidden=100, s_out=10, std_w_init=0.01): n_f = par['num_filter'] s_f = par['size_filter'] pad = par['pad'] stride = par['stride'] size_in = dim_in[1] size_out_conv = int((size_in + 2 * pad - s_f) / stride) + 1 size_out_pool = int(n_f * (size_out_conv / 2) ** 2) self.params = {} self.params['W1'] =\ std_w_init * np.random.randn(n_f, dim_in[0], s_f, s_f) self.params['b1'] = np.zeros(n_f) self.params['W2'] = std_w_init * np.random.randn(size_out_pool, s_hidden) self.params['b2'] = np.zeros(s_hidden) self.params['W3'] = std_w_init * np.random.randn(s_hidden, s_out) self.params['b3'] = np.zeros(s_out) self.layers = OrderedDict() self.layers['Conv'] = Convolution(self.params['W1'], self.params['b1'], stride, pad) self.layers['Relu1'] = Relu() self.layers['Pool'] = Pooling(2, 2, 2) self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2']) self.layers['Relu'] = Relu() self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3']) self.last_layer = SoftmaxWithLoss()
def __init__(self, input_dim = (1, 28, 28), conv_params = {'filter_num':30,'filter_size': 5, 'pad': 0, 'stride':1}, hidden_size = 100, output_size = 10, weight_init_std = 0.01): """ 인스턴스 초기화 (변수들의 초기값을 줌) - CNN 구성, 변수들 초기화 input_dim: 입력 데이터 차원, MINIST인 경우(1, 28, 28) conv_param: Convolution 레이어의 파라미터(filter, bias)를 생성하기 위해 필요한 값들 필터 개수 (filter_num), 필터 크기(filter_size = filter_height = filter_width), 패딩 개수(pad), 보폭(stride) hidden_size: Affine 계층에서 사용할 뉴런의 개수 -> W 행렬의 크기 output_size: 출력값의 원소의 개수. MNIST인 경우 10 weight_init_std: 가중치(weight) 행렬을 난수로 초기화 할 때 사용할 표준편차 """ filter_num = conv_params['filter_num'] filter_size = conv_params['filter_size'] filter_pad = conv_params['pad'] filter_stride = conv_params['stride'] input_size = input_dim[1] conv_output_size = (input_size - filter_size + 2 * filter_pad) / \ filter_stride + 1 pool_output_size = int(filter_num * (conv_output_size / 2) * (conv_output_size / 2)) # CNN Layer에서 필요한 파라미터들 self.params = dict() self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size) self.params['b1'] = np.zeros(filter_num) self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size) self.params['b2'] = np.zeros(hidden_size) self.params['W'] = weight_init_std * np.random.randn(hidden_size, output_size) self.params['b3'] = np.zeros(output_size) # CNN Layer(계층) 생성, 연결 self.layers = OrderedDict() # 방법 1 __init__(self,W,b) 라고 주고, self.W = W, self.b = b 를 선언 # self.W = W # 난수로 생성하려고 해도 데이터의 크기(size)를 알아야 필터를 생성할 수 있다 # self.b = b # bias의 크기는 필터의 크기와 같다. 마찬가지로 난수로 생성해도 크기를 알아야한다 => dimension 결정 # 방법 2 # input_dim = (1, 28, 28) = MNIST를 위한 클래스 # dimension을 주도록 설정 + 필터갯수가 있도록 설정해줘야한다 # convolution 할 때 필터를 몇번 만들 것인가 -> 난수로 만들어서 넣어줄 수 있다 # key값 self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'], conv_params['stride'], conv_params['pad']) # W와 b를 선언 self.layers['ReLu1'] = Relu() # x -> Convolution에서 전해주는 값 self.layers['Pool1'] = Pooling(pool_h = 2, pool_w =2, stride =2) self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2']) self.layers['Relu2'] = Relu() self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3']) self.last_layer = SoftmaxWithLoss()
def __init__(self, input_dim=(1, 28, 28), conv_param={ 'filter_num': 30, 'filter_size': 5, 'pad': 0, 'stride': 1 }, hidden_size=100, output_size=10, weight_init_std=0.01): filter_num = conv_param['filter_num'] filter_size = conv_param['filter_size'] filter_pad = conv_param['pad'] filter_stride = conv_param['stride'] input_size = input_dim[1] conv_output_size = (input_size - filter_size + 2 * filter_pad) / filter_stride + 1 pool_output_size = int(filter_num * (conv_output_size / 2) * (conv_output_size / 2)) """ 인스턴스 초기화 - CNN 구성, 변수들 초기화""" # CNN layer에서 필요한 파라미터들 self.params = dict() self.params['W1'] = weight_init_std * \ np.random.randn(filter_num, input_dim[0], filter_size, filter_size) self.params['b1'] = np.zeros(filter_num) self.params['W2'] = weight_init_std * \ np.random.randn(pool_output_size, hidden_size) self.params['b2'] = np.zeros(hidden_size) self.params['W3'] = weight_init_std * \ np.random.randn(hidden_size, output_size) self.params['b3'] = np.zeros(output_size) # CNN layer(계층) 생성, 연결 self.layers = OrderedDict() self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'], conv_param['stride'], conv_param['pad']) self.layers['Relu1'] = Relu() self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2) self.layers['Relu2'] = Relu() self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3']) self.last_layer = SoftmaxWithLoss()
def __init__(self, input_dim=(1, 28, 28), conv_param_1=None, conv_param_2=None, conv_param_3=None, conv_param_4=None, conv_param_5=None, conv_param_6=None, hidden_size=50, output_size=10): # 第一个卷积层输入1x28x28,输出16x28x28 if conv_param_1 is None: conv_param_1 = { 'filter_num': 16, 'filter_size': 3, 'pad': 1, 'stride': 1 } # 第二个卷积层输入16x28x28,输出16x28x28 if conv_param_2 is None: conv_param_2 = { 'filter_num': 16, 'filter_size': 3, 'pad': 1, 'stride': 1 } # 第二个卷积层之后接最大池化层,池化层大小为2x2,步长为2,即高、宽减半 # 第三个卷积层输入16x14x14,输出32x14x14 if conv_param_3 is None: conv_param_3 = { 'filter_num': 32, 'filter_size': 3, 'pad': 1, 'stride': 1 } # 第四个卷积层输入32x14x14,但由于pad2个,因此输出32x16x16 if conv_param_4 is None: conv_param_4 = { 'filter_num': 32, 'filter_size': 3, 'pad': 2, 'stride': 1 } # 第四个卷积层之后接最大池化层,池化层大小为2x2,步长为2,即高、宽减半 # 第五个卷积层输入32x8x8,输出64x8x8 if conv_param_5 is None: conv_param_5 = { 'filter_num': 64, 'filter_size': 3, 'pad': 1, 'stride': 1 } # 第五个卷积层输入64x8x8,输出64x8x8 if conv_param_6 is None: conv_param_6 = { 'filter_num': 64, 'filter_size': 3, 'pad': 1, 'stride': 1 } """ 卷积层的每个节点只与前一层的filter_size个节点连接, 即本层卷积层的卷积核 高x宽有多少,就和前一层的多少个节点连接。 如果有多个通道,那还要乘上通道数(深度) 这里的所有卷积层都用3x3的大小 各层输出如下: 卷积层1: 16 28 28 卷积层2 | 池化层1: 16 28 28 | 16 14 14 卷积层3: 32 14 14 卷积层4 | 池化层2: 32 16 16 | 32 8 8 卷积层5: 64 8 8 卷积层6: 64 8 8 | 64 4 4 """ pre_node_nums = np.array([ 1 * 3 * 3, # 卷积层1:前一层(输入层)通道数(深度)为1 16 * 3 * 3, # 卷积层2:前一层(卷积层1)通道数(深度)为16 16 * 3 * 3, # 卷积层3:前一层(卷积层2)通道数(深度)为16 32 * 3 * 3, # 卷积层4:前一层(卷积层3)通道数(深度)为32 32 * 3 * 3, # 卷积层5:前一层(卷积层4)通道数(深度)为32 64 * 3 * 3, # 卷积层6:前一层(卷积层5)通道数(深度)为64 # 隐藏层:前一层(池化层),池化层接全连接层需要拉直成一维数组, # 因此隐藏层与前一层(池化层)的连接数为池化层的输出节点总数 64 * 4 * 4, # 输出层:前一层(隐藏层),全连接与前一层全部节点相连,即隐藏层大小 hidden_size ]) # 权重初始化时的标准差。由于使用ReLU激活函数,因此使用He初始化方式 weight_init_scales = np.sqrt(2.0 / pre_node_nums) """初始化权重参数和偏置""" self.params = {} pre_channel_num = input_dim[0] # 记录上一层的通道数(即滤波器的通道数) for idx, conv_param in enumerate([ conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6 ]): # 卷积层滤波器的形状:滤波器个数、通道数、高度、宽度 self.params['W'+str(idx+1)] = weight_init_scales[idx] *\ np.random.randn( conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size']) self.params['b' + str(idx + 1)] = np.zeros( conv_param['filter_num']) pre_channel_num = conv_param['filter_num'] # 更新上一层的通道数 self.params['W7'] = weight_init_scales[6] * np.random.randn( 64 * 4 * 4, hidden_size) self.params['b7'] = np.zeros(hidden_size) self.params['W8'] = weight_init_scales[7] * np.random.randn( hidden_size, output_size) self.params['b8'] = np.zeros(output_size) """ 构造神经网络: 书上没有用到之前用的有序字典,其实我觉得很好用,所以就实现了有序字典版本 Conv1->ReLU1->Conv2->ReLU2->Pool1-> Conv3->ReLU3->Conv4->ReLU4->Pool2-> Conv5->ReLU5->Conv6->ReLU6->Pool3-> Affine1(Hidden Layer1)->ReLU7->Dropout1-> Affine2(Output Layer1)->Dropout2------->SoftmaxWithLoss """ self.layers = OrderedDict() self.layers['Conv1'] = Convolution(self.params['W1'], self.params['b1'], stride=conv_param_1['stride'], pad=conv_param_1['pad']) self.layers['ReLU1'] = ReLU() self.layers['Conv2'] = Convolution(self.params['W2'], self.params['b2'], stride=conv_param_2['stride'], pad=conv_param_2['pad']) self.layers['ReLU2'] = ReLU() self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2, pad=0) self.layers['Conv3'] = Convolution(self.params['W3'], self.params['b3'], stride=conv_param_3['stride'], pad=conv_param_3['pad']) self.layers['ReLU3'] = ReLU() self.layers['Conv4'] = Convolution(self.params['W4'], self.params['b4'], stride=conv_param_4['stride'], pad=conv_param_4['pad']) self.layers['ReLU4'] = ReLU() self.layers['Pool2'] = Pooling(pool_h=2, pool_w=2, stride=2, pad=0) self.layers['Conv5'] = Convolution(self.params['W5'], self.params['b5'], stride=conv_param_5['stride'], pad=conv_param_5['pad']) self.layers['ReLU5'] = ReLU() self.layers['Conv6'] = Convolution(self.params['W6'], self.params['b6'], stride=conv_param_6['stride'], pad=conv_param_6['pad']) self.layers['ReLU6'] = ReLU() self.layers['Pool3'] = Pooling(pool_h=2, pool_w=2, stride=2, pad=0) self.layers['Affine1'] = Affine(self.params['W7'], self.params['b7']) self.layers['ReLU7'] = ReLU() self.layers['Dropout1'] = Dropout(dropout_ratio=0.5) self.layers['Affine2'] = Affine(self.params['W8'], self.params['b8']) self.layers['Dropout2'] = Dropout(dropout_ratio=0.5) self.last_layer = SoftmaxWithLoss()