def __init__(self, input_dim=(1, 28, 28),
                 conv_param_1={'filter_num': 16, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 conv_param_2={'filter_num': 16, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 conv_param_3={'filter_num': 32, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 conv_param_4={'filter_num': 32, 'filter_size': 3, 'pad': 2, 'stride': 1},
                 conv_param_5={'filter_num': 64, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 conv_param_6={'filter_num': 64, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 hidden_size=50, output_size=10):
        pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])
        weight_init_scale = np.sqrt(2.0 / pre_node_nums)

        # weights init
        self.params = {}
        pre_channel_num = input_dim[0]
        for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3,
                                          conv_param_4, conv_param_5, conv_param_6]):
            self.params['w'+str(idx+1)] = weight_init_scale[idx] *\
                    np.random.randn(
                        conv_param['filter_num'],
                        pre_channel_num, conv_param['filter_size'],
                        conv_param['filter_size'])
            self.params['b'+str(idx+1)] = np.zeros(conv_param['filter_num'])
            pre_channel_num = conv_param['filter_num']
        self.params['w7'] = weight_init_scale[6] * np.random.randn(64*4*4, hidden_size)
        self.params['b7'] = np.zeros(hidden_size)
        self.params['w8'] = weight_init_scale[7] * np.random.randn(hidden_size, output_size)
        self.params['b8'] = np.zeros(output_size)

        # gen layers
        self.layers = []
        self.layers.append(Convolution(self.params['w1'], self.params['b1'], conv_param_1['stride'],
                                       conv_param_1['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['w2'], self.params['b2'], conv_param_2['stride'],
                                       conv_param_2['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Convolution(self.params['w3'], self.params['b3'], conv_param_3['stride'],
                                       conv_param_3['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['w4'], self.params['b4'], conv_param_4['stride'],
                                       conv_param_4['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Convolution(self.params['w5'], self.params['b5'], conv_param_5['stride'],
                                       conv_param_5['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['w6'], self.params['b6'], conv_param_6['stride'],
                                       conv_param_6['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Affine(self.params['w7'], self.params['b7']))
        self.layers.append(Relu())
        self.layers.append(Dropout(0.5))
        self.layers.append(Affine(self.params['w8'], self.params['b8']))
        self.layers.append(Dropout(0.5))
        self.last_layer = SoftmaxWithLoss()
    def __init__(self, input_dim=(1, 28, 28),
                 conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},
                 conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},
                 conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},
                 conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},
                 conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},
                 conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},
                 hidden_size=50, output_size=10):
        # 重みの初期化===========
        # 各層のニューロンひとつあたりが、前層のニューロンといくつのつながりがあるか(TODO:自動で計算する)
        pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])
        weight_init_scales = np.sqrt(2.0 / pre_node_nums)  # ReLUを使う場合に推奨される初期値
        
        self.params = {} 
        pre_channel_num = input_dim[0]
        for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]):
            self.params['W' + str(idx+1)] = weight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size'])
            self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num'])
            pre_channel_num = conv_param['filter_num']
        self.params['W7'] = weight_init_scales[6] * np.random.randn(64*4*4, hidden_size)
        self.params['b7'] = np.zeros(hidden_size)
        self.params['W8'] = weight_init_scales[7] * np.random.randn(hidden_size, output_size)
        self.params['b8'] = np.zeros(output_size)

        # レイヤの生成===========
        self.layers = []
        self.layers.append(Convolution(self.params['W1'], self.params['b1'], 
                           conv_param_1['stride'], conv_param_1['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['W2'], self.params['b2'], 
                           conv_param_2['stride'], conv_param_2['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Convolution(self.params['W3'], self.params['b3'], 
                           conv_param_3['stride'], conv_param_3['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['W4'], self.params['b4'],
                           conv_param_4['stride'], conv_param_4['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Convolution(self.params['W5'], self.params['b5'],
                           conv_param_5['stride'], conv_param_5['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['W6'], self.params['b6'],
                           conv_param_6['stride'], conv_param_6['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Affine(self.params['W7'], self.params['b7']))
        self.layers.append(Relu())
        self.layers.append(Dropout(0.5))
        self.layers.append(Affine(self.params['W8'], self.params['b8']))
        self.layers.append(Dropout(0.5))
        
        self.last_layer = SoftmaxWithLoss()
    def __init__(self,
                 input_size,
                 hidden_size_list,
                 output_size,
                 activation="relu",
                 weight_init_std="relu",
                 weight_decay_lambda=0,
                 use_dropout=False,
                 dropout_ratio=0.5,
                 use_batchnorm=False):
        """
        :param input_size: 输入的大小
        :param hidden_size_list: 隐藏层的神经元数量列表
        :param output_size: 输出的大小
        :param activation: "relu" or "sigmoid"
        :param weight_init_std: 指定权重的标准差,
        指定"relu" 或者 "he" 是定为"He"的初始值
        指定"sigmoid" 或者 "xavier" 是定为"Xauver"的初始值
        :param weight_decay_lambda: Weight Decay(L2范数)的强度
        :param use_dropout: 是否使用Dropout
        :param dropout_ratio: Dropout比例
        :param use_batchnorm: 是否只用Batch Normalization
        """
        self.input_size = input_size
        self.output_size = output_size
        self.hidden_size_list = hidden_size_list
        self.hidden_layer_num = len(hidden_size_list)
        self.use_dropout = use_dropout
        self.weight_decay_lambda = weight_decay_lambda
        self.use_batchnorm = use_batchnorm
        self.params = {}

        # 初始化权值
        self.__init_weight(weight_init_std)

        # 生成层
        activation_layer = {"sigmoid": Sigmoid, "relu": ReLU}
        self.layers = OrderedDict()
        for idx in range(1, self.hidden_layer_num + 1):
            self.layers["Affine" + str(idx)] = Affine(
                self.params["W" + str(idx)], self.params["b" + str(idx)])
            if self.use_batchnorm:
                self.params["gamma" + str(idx)] = np.ones(
                    hidden_size_list[idx - 1])
                self.params["beta" + str(idx)] = np.zeros(
                    hidden_size_list[idx - 1])
                self.layers['BatchNorm' + str(idx)] = BatchNormalization(
                    self.params['gamma' + str(idx)],
                    self.params['beta' + str(idx)])

            self.layers["Activation_function" +
                        str(idx)] = activation_layer[activation]()

            if self.use_dropout:
                self.layers["Dropout" + str(idx)] = Dropout(dropout_ratio)
        idx = self.hidden_layer_num + 1
        self.layers["Affine" + str(idx)] = Affine(self.params["W" + str(idx)],
                                                  self.params["b" + str(idx)])
        self.last_layer = SoftmaxWithLoss()
    def __init__(
        self,
        input_size,
        hidden_size_list,
        output_size,
        activation="relu",
        weight_init_std="relu",
        weight_decay_lambda=0,
        use_dropout=False,
        dropout_ratio=0.5,
        use_batchnorm=False,
    ):
        self.input_size = input_size
        self.hidden_size_list = hidden_size_list
        self.hidden_layer_num = len(hidden_size_list)
        self.output_size = output_size
        self.weight_decay_lambda = weight_decay_lambda
        self.use_dropout = use_dropout
        self.use_batchnorm = use_batchnorm
        self.params = {}

        # Weight initialization
        self.__init_weight(weight_init_std)

        # Create layers
        activation_layer = {"sigmoid": Sigmoid, "relu": Relu}
        self.layers = OrderedDict()

        for idx in range(1, self.hidden_layer_num + 1):
            self.layers["Affine" + str(idx)] = Affine(
                self.params["W" + str(idx)], self.params["b" + str(idx)]
            )

            if self.use_batchnorm:
                self.params["gamma" + str(idx)] = np.ones(hidden_size_list[idx - 1])
                self.params["beta" + str(idx)] = np.zeros(hidden_size_list[idx - 1])
                self.layers["BatchNorm" + str(idx)] = BatchNormalization(
                    self.params["gamma" + str(idx)], self.params["beta" + str(idx)]
                )

            self.layers["Activation_function" + str(idx)] = activation_layer[activation]()

            if self.use_dropout:
                self.layers["Dropout" + str(idx)] = Dropout(dropout_ratio)

        idx = self.hidden_layer_num + 1
        self.layers["Affine" + str(idx)] = Affine(
            self.params["W" + str(idx)], self.params["b" + str(idx)]
        )

        self.last_layer = SoftmaxLoss()
Ejemplo n.º 5
0
Archivo: nn.py Proyecto: hiranotnsk2/SA
    def __init__(self, input_dim=(1, 28, 28), 
                 conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
                 hidden_size=100, output_size=10, weight_init_std=0.01):
        """
        input_size : 入力の配列形状(チャンネル数、画像の高さ、画像の幅)
        conv_param : 畳み込みの条件, dict形式  例、{'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1}
        hidden_size : 隠れ層のノード数
        output_size : 出力層のノード数
        weight_init_std : 重みWを初期化する際に用いる標準偏差
        """
                
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
        pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))

        # 重みの初期化
        self.params = {}
        std = weight_init_std
        self.params['W1_1'] = std * np.random.randn(16, 1, 3,3) 
        self.params['b1_1'] = np.zeros(16)
        self.params['W1_3'] = std * np.random.randn(16, 16, 3,3) 
        self.params['b1_3'] = np.zeros(16)

        self.params['W2_1'] = std * np.random.randn(32, 16, 3,3) 
        self.params['b2_1'] = np.zeros(32)
        self.params['W2_3'] = std * np.random.randn(32, 32, 3,3) 
        self.params['b2_3'] = np.zeros(32)

        self.params['W3_1'] = std * np.random.randn(64, 32, 3,3) 
        self.params['b3_1'] = np.zeros(64)
        self.params['W3_3'] = std * np.random.randn(64, 64, 3,3) 
        self.params['b3_3'] = np.zeros(64)
        
        
        self.params['W4_1'] = std * np.random.randn(64*4*4, hidden_size)
        self.params['b4_1'] = np.zeros(hidden_size)
        self.params['W5_1'] = std * np.random.randn(hidden_size, output_size)
        self.params['b5_1'] = np.zeros(output_size)

        # レイヤの生成
        self.layers = OrderedDict()
        self.layers['Conv1_1'] = Convolution(self.params['W1_1'], self.params['b1_1'],1,1)
        self.layers['ReLU1_2'] = ReLU()
        self.layers['Conv1_3'] = Convolution(self.params['W1_3'], self.params['b1_3'],1,1)
        self.layers['ReLU1_4'] = ReLU()
        self.layers['Pool1_5'] = MaxPooling(pool_h=2, pool_w=2, stride=2)

        self.layers['Conv2_1'] = Convolution(self.params['W2_1'], self.params['b2_1'],1,1)
        self.layers['ReLU2_2'] = ReLU()
        self.layers['Conv2_3'] = Convolution(self.params['W2_3'], self.params['b2_3'],1,2)
        self.layers['ReLU2_4'] = ReLU()
        self.layers['Pool2_5'] = MaxPooling(pool_h=2, pool_w=2, stride=2)

        self.layers['Conv3_1'] = Convolution(self.params['W3_1'], self.params['b3_1'],1,1)
        self.layers['ReLU3_2'] = ReLU()
        self.layers['Conv3_3'] = Convolution(self.params['W3_3'], self.params['b3_3'],1,1)
        self.layers['ReLU3_4'] = ReLU()
        self.layers['Pool3_5'] = MaxPooling(pool_h=2, pool_w=2, stride=2)        
        
        
        self.layers['Affine4_1'] = Affine(self.params['W4_1'], self.params['b4_1'])
        self.layers['ReLU4_2'] = ReLU()        
        self.layers['Dropout4_3'] = Dropout(0.5)
        
        self.layers['Affine5_1'] = Affine(self.params['W5_1'], self.params['b5_1'])
        self.layers['Dropout5_2'] = Dropout(0.5)

        self.last_layer = SoftmaxWithLoss()
Ejemplo n.º 6
0
    def __init__(self,
                 input_dim=(1, 28, 28),
                 use_conv2=True,
                 use_affine2=True,
                 conv_param={
                     'filter_num': 128,
                     'filter_size': 3,
                     'pad': 1,
                     'stride': 1
                 },
                 pool_param={
                     'pool_size': 2,
                     'pad': 1,
                     'stride': 2
                 },
                 conv_param2={
                     'filter_num2': 128,
                     'filter_size2': 3,
                     'pad2': 1,
                     'stride2': 1
                 },
                 pool_param2={
                     'pool_size2': 2,
                     'pad2': 1,
                     'stride2': 2
                 },
                 hidden_size=128,
                 hidden_size2=128,
                 output_size=15,
                 weight_init_std=0.01,
                 use_batchnorm_C1=False,
                 use_batchnorm_C2=False,
                 use_batchnorm_A1=False,
                 use_batchnorm_A2=False,
                 use_dropout_A1=False,
                 dropout_ratio_A1=0.5,
                 use_dropout_A2=False,
                 dropout_ratio_A2=0.5,
                 use_succession=False,
                 data_num=1,
                 prediction_mode=False):

        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']

        pool_size = pool_param['pool_size']
        pool_pad = pool_param['pad']
        pool_stride = pool_param['stride']

        filter_num2 = conv_param2['filter_num2']
        filter_size2 = conv_param2['filter_size2']
        filter_pad2 = conv_param2['pad2']
        filter_stride2 = conv_param2['stride2']

        pool_size2 = pool_param2['pool_size2']
        pool_pad2 = pool_param2['pad2']
        pool_stride2 = pool_param2['stride2']

        input_size = input_dim[1]
        conv_output_size = (input_size + 2 * filter_pad - filter_size
                            ) // filter_stride + 1  # 畳み込み後のサイズ(H,W共通)
        pool_output_size = (conv_output_size + 2 * pool_pad -
                            pool_size) // pool_stride + 1  # プーリング後のサイズ(H,W共通)
        pool_output_pixel = filter_num * pool_output_size * pool_output_size  # プーリング後のピクセル総数

        input_size2 = pool_output_size
        conv_output_size2 = (input_size2 + 2 * filter_pad2 - filter_size2
                             ) // filter_stride2 + 1  # 畳み込み後のサイズ(H,W共通)
        pool_output_size2 = (conv_output_size2 + 2 * pool_pad2 - pool_size2
                             ) // pool_stride2 + 1  # プーリング後のサイズ(H,W共通)
        pool_output_pixel2 = filter_num2 * pool_output_size2 * pool_output_size2  # プーリング後のピクセル総数

        self.use_conv2 = use_conv2
        self.use_affine2 = use_affine2
        self.use_batchnorm_C1 = use_batchnorm_C1
        self.use_batchnorm_C2 = use_batchnorm_C2
        self.use_batchnorm_A1 = use_batchnorm_A1
        self.use_batchnorm_A2 = use_batchnorm_A2
        self.use_dropout_A1 = use_dropout_A1
        self.use_dropout_A2 = use_dropout_A2
        self.dropout_ratio_A1 = dropout_ratio_A1
        self.dropout_ratio_A2 = dropout_ratio_A2
        self.use_succession = use_succession
        self.data_num = data_num
        self.prediction_mode = prediction_mode

        # if W1 == []:
        self.params = {}
        self.paramsB = {}
        std = weight_init_std

        if self.use_succession:
            #----------重みをpickleから代入--------------
            with open("params_" + str(self.data_num) + ".pickle", "rb") as f:
                params_s = pickle.load(f)
            with open("params_BN" + str(self.data_num) + ".pickle", "rb") as f:
                params_BN = pickle.load(f)
            # self.params = {}
            # self.paramsB = {}

            self.params['W1'] = params_s['W1']  # W1は畳み込みフィルターの重みになる
            self.params['b1'] = params_s['b1']
            if self.use_batchnorm_C1:
                self.paramsB["BC1_moving_mean"] = params_BN["BC1_moving_mean"]
                self.paramsB["BC1_moving_var"] = params_BN["BC1_moving_var"]

            if self.use_conv2:
                self.params['W1_2'] = params_s['W1_2']
                self.params['b1_2'] = params_s['b1_2']
                if self.use_batchnorm_C2:
                    self.paramsB["BC2_moving_mean"] = params_BN[
                        "BC2_moving_mean"]
                    self.paramsB["BC2_moving_var"] = params_BN[
                        "BC2_moving_var"]

            self.params['W2'] = params_s['W2']
            self.params['b2'] = params_s['b2']

            if self.use_batchnorm_A1:
                self.paramsB["BA1_moving_mean"] = params_BN["BA1_moving_mean"]
                self.paramsB["BA1_moving_var"] = params_BN["BA1_moving_var"]

            if self.use_affine2:
                self.params['W2_2'] = params_s['W2_2']
                self.params['b2_2'] = params_s['b2_2']
                if self.use_batchnorm_A2:
                    self.paramsB["BA2_moving_mean"] = params_BN[
                        "BA2_moving_mean"]
                    self.paramsB["BA2_moving_var"] = params_BN[
                        "BA2_moving_var"]

            self.params['W3'] = params_s['W3']
            self.params['b3'] = params_s['b3']

            #----------重みをpickleから代入--------------
        else:
            # 重みの初期化
            #----第1層Conv----
            self.params['W1'] = std * np.random.randn(
                filter_num, input_dim[0], filter_size,
                filter_size)  # W1は畳み込みフィルターの重みになる
            self.params['b1'] = np.zeros(filter_num)  #b1は畳み込みフィルターのバイアスになる

            #----第2層Conv----
            if self.use_conv2:
                self.params['W1_2'] = std * np.random.randn(
                    filter_num2, filter_num, filter_size2,
                    filter_size2)  #-----追加------
                self.params['b1_2'] = np.zeros(filter_num2)  #-----追加------

                #----第3層Affine----
                self.params['W2'] = std * np.random.randn(
                    pool_output_pixel2, hidden_size)
            else:
                self.params['W2'] = std * np.random.randn(
                    pool_output_pixel, hidden_size)
            self.params['b2'] = np.zeros(hidden_size)

            #----第4層Affine----
            if self.use_affine2:
                self.params['W2_2'] = std * np.random.randn(
                    hidden_size, hidden_size2)  #-----追加------
                self.params['b2_2'] = np.zeros(hidden_size2)  #-----追加------

                #----第5層出力----
                self.params['W3'] = std * np.random.randn(
                    hidden_size2, output_size)  #--変更--
            else:
                self.params['W3'] = std * np.random.randn(
                    hidden_size, output_size)  #--変更--
            self.params['b3'] = np.zeros(output_size)

        # レイヤの生成
        self.layers = OrderedDict()
        #----第1層Conv----
        self.layers['Conv1'] = Convolution(
            self.params['W1'], self.params['b1'], conv_param['stride'],
            conv_param['pad'])  # W1が畳み込みフィルターの重み, b1が畳み込みフィルターのバイアスになる
        if self.use_batchnorm_C1:
            print(conv_output_size)
            print(conv_output_size ^ 2)
            batch_num = conv_output_size * conv_output_size
            if self.prediction_mode:
                self.layers['BatchNormalization_C1'] = BatchNormalization(
                    np.ones(batch_num, filter_num),
                    np.zeros(filter_num),
                    moving_mean=self.paramsB["BC1_moving_mean"],
                    moving_var=self.paramsB["BC1_moving_var"])
            else:
                self.layers['BatchNormalization_C1'] = BatchNormalization(
                    np.ones(batch_num),
                    np.zeros(batch_num),
                    DataNum=self.data_num,
                    LayerNum="C1")
                self.paramsB["BC1_moving_mean"] = self.layers[
                    'BatchNormalization_C1'].moving_mean
                self.paramsB["BC1_moving_var"] = self.layers[
                    'BatchNormalization_C1'].moving_var
        self.layers['ReLU1'] = ReLU()
        self.layers['Pool1'] = MaxPooling(pool_h=pool_size,
                                          pool_w=pool_size,
                                          stride=pool_stride,
                                          pad=pool_pad)

        #----第2層Conv----
        if self.use_conv2:
            self.layers['Conv1_2'] = Convolution(
                self.params['W1_2'], self.params['b1_2'],
                conv_param2['stride2'], conv_param2['pad2'])  #-----追加------
            if self.use_batchnorm_C2:
                batch_num2 = conv_output_size2 * conv_output_size2 * filter_num2
                if self.prediction_mode:
                    self.layers['BatchNormalization_C2'] = BatchNormalization(
                        np.ones(batch_num),
                        np.zeros(batch_num),
                        moving_mean=self.paramsB["BC2_moving_mean"],
                        moving_var=self.paramsB["BC12moving_var"])
                else:
                    self.layers['BatchNormalization_C2'] = BatchNormalization(
                        np.ones(batch_num),
                        np.zeros(batch_num),
                        DataNum=self.data_num,
                        LayerNum="C2")
                    self.paramsB["BC2_moving_mean"] = self.layers[
                        'BatchNormalization_C2'].moving_mean
                    self.paramsB["BC2_moving_var"] = self.layers[
                        'BatchNormalization_C2'].moving_var
            self.layers['ReLU1_2'] = ReLU()  #-----追加------
            self.layers['Pool1_2'] = MaxPooling(pool_h=pool_size2,
                                                pool_w=pool_size2,
                                                stride=pool_stride2,
                                                pad=pool_pad2)  #-----追加------

        #----第3層Affine----
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        if self.use_batchnorm_A1:
            if self.prediction_mode:
                self.layers['BatchNormalization_A1'] = BatchNormalization(
                    np.ones(hidden_size),
                    np.zeros(hidden_size),
                    moving_mean=self.paramsB["BA1_moving_mean"],
                    moving_var=self.paramsB["BA1_moving_var"])
            else:
                self.layers['BatchNormalization_A1'] = BatchNormalization(
                    np.ones(hidden_size),
                    np.zeros(hidden_size),
                    DataNum=self.data_num,
                    LayerNum="A1")
                self.paramsB["BA1_moving_mean"] = self.layers[
                    'BatchNormalization_A1'].moving_mean
                self.paramsB["BA1_moving_var"] = self.layers[
                    'BatchNormalization_A1'].moving_var

        if self.use_dropout_A1:
            self.layers['DropoutA1'] = Dropout(self.dropout_ratio_A1)
        self.layers['ReLU2'] = ReLU()

        # ----第4層Affine----
        if self.use_affine2:
            self.layers['Affine2'] = Affine(
                self.params['W2_2'], self.params['b2_2'])  #-----追加------
            if self.use_batchnorm_A2:
                if self.prediction_mode:
                    self.layers['BatchNormalization_A2'] = BatchNormalization(
                        np.ones(hidden_size2),
                        np.zeros(hidden_size2),
                        moving_mean=self.paramsB["BA2_moving_mean"],
                        moving_var=self.paramsB["BA2_moving_var"])
                else:
                    self.layers['BatchNormalization_A2'] = BatchNormalization(
                        np.ones(hidden_size2),
                        np.zeros(hidden_size2),
                        DataNum=self.data_num,
                        LayerNum="A2")
                self.paramsB["BA2_moving_mean"] = self.layers[
                    'BatchNormalization_A2'].moving_mean
                self.paramsB["BA2_moving_var"] = self.layers[
                    'BatchNormalization_A2'].moving_var

            if self.use_dropout_A2:
                self.layers['DropoutA2'] = Dropout(self.dropout_ratio_A2)
            self.layers['ReLU3'] = ReLU()  #-----追加------

        #----第5層出力----
        self.layers['Affine3'] = Affine(self.params['W3'], self.params['b3'])
        self.last_layer = SoftmaxWithLoss()
    def __init__(self,
                 input_dim=(1, 28, 28),
                 conv_param_1=None,
                 conv_param_2=None,
                 conv_param_3=None,
                 conv_param_4=None,
                 conv_param_5=None,
                 conv_param_6=None,
                 hidden_size=50,
                 output_size=10):
        # 第一个卷积层输入1x28x28,输出16x28x28
        if conv_param_1 is None:
            conv_param_1 = {
                'filter_num': 16,
                'filter_size': 3,
                'pad': 1,
                'stride': 1
            }
        # 第二个卷积层输入16x28x28,输出16x28x28
        if conv_param_2 is None:
            conv_param_2 = {
                'filter_num': 16,
                'filter_size': 3,
                'pad': 1,
                'stride': 1
            }
        # 第二个卷积层之后接最大池化层,池化层大小为2x2,步长为2,即高、宽减半
        # 第三个卷积层输入16x14x14,输出32x14x14
        if conv_param_3 is None:
            conv_param_3 = {
                'filter_num': 32,
                'filter_size': 3,
                'pad': 1,
                'stride': 1
            }
        # 第四个卷积层输入32x14x14,但由于pad2个,因此输出32x16x16
        if conv_param_4 is None:
            conv_param_4 = {
                'filter_num': 32,
                'filter_size': 3,
                'pad': 2,
                'stride': 1
            }
        # 第四个卷积层之后接最大池化层,池化层大小为2x2,步长为2,即高、宽减半
        # 第五个卷积层输入32x8x8,输出64x8x8
        if conv_param_5 is None:
            conv_param_5 = {
                'filter_num': 64,
                'filter_size': 3,
                'pad': 1,
                'stride': 1
            }
        # 第五个卷积层输入64x8x8,输出64x8x8
        if conv_param_6 is None:
            conv_param_6 = {
                'filter_num': 64,
                'filter_size': 3,
                'pad': 1,
                'stride': 1
            }
        """
        卷积层的每个节点只与前一层的filter_size个节点连接,
        即本层卷积层的卷积核 高x宽有多少,就和前一层的多少个节点连接。
        如果有多个通道,那还要乘上通道数(深度)
        这里的所有卷积层都用3x3的大小
        
        各层输出如下:
        卷积层1:              16 28 28
        卷积层2 | 池化层1:    16 28 28 | 16 14 14
        卷积层3:              32 14 14
        卷积层4 | 池化层2:    32 16 16 | 32 8 8
        卷积层5:              64 8 8
        卷积层6:              64 8 8 | 64 4 4
        """
        pre_node_nums = np.array([
            1 * 3 * 3,  # 卷积层1:前一层(输入层)通道数(深度)为1
            16 * 3 * 3,  # 卷积层2:前一层(卷积层1)通道数(深度)为16
            16 * 3 * 3,  # 卷积层3:前一层(卷积层2)通道数(深度)为16
            32 * 3 * 3,  # 卷积层4:前一层(卷积层3)通道数(深度)为32
            32 * 3 * 3,  # 卷积层5:前一层(卷积层4)通道数(深度)为32
            64 * 3 * 3,  # 卷积层6:前一层(卷积层5)通道数(深度)为64

            # 隐藏层:前一层(池化层),池化层接全连接层需要拉直成一维数组,
            # 因此隐藏层与前一层(池化层)的连接数为池化层的输出节点总数
            64 * 4 * 4,
            # 输出层:前一层(隐藏层),全连接与前一层全部节点相连,即隐藏层大小
            hidden_size
        ])

        # 权重初始化时的标准差。由于使用ReLU激活函数,因此使用He初始化方式
        weight_init_scales = np.sqrt(2.0 / pre_node_nums)
        """初始化权重参数和偏置"""
        self.params = {}
        pre_channel_num = input_dim[0]  # 记录上一层的通道数(即滤波器的通道数)
        for idx, conv_param in enumerate([
                conv_param_1, conv_param_2, conv_param_3, conv_param_4,
                conv_param_5, conv_param_6
        ]):
            # 卷积层滤波器的形状:滤波器个数、通道数、高度、宽度
            self.params['W'+str(idx+1)] = weight_init_scales[idx] *\
                                        np.random.randn(
                                            conv_param['filter_num'],
                                            pre_channel_num,
                                            conv_param['filter_size'],
                                            conv_param['filter_size'])
            self.params['b' + str(idx + 1)] = np.zeros(
                conv_param['filter_num'])

            pre_channel_num = conv_param['filter_num']  # 更新上一层的通道数

        self.params['W7'] = weight_init_scales[6] * np.random.randn(
            64 * 4 * 4, hidden_size)
        self.params['b7'] = np.zeros(hidden_size)
        self.params['W8'] = weight_init_scales[7] * np.random.randn(
            hidden_size, output_size)
        self.params['b8'] = np.zeros(output_size)
        """
        构造神经网络:
        书上没有用到之前用的有序字典,其实我觉得很好用,所以就实现了有序字典版本
        Conv1->ReLU1->Conv2->ReLU2->Pool1->
        Conv3->ReLU3->Conv4->ReLU4->Pool2->
        Conv5->ReLU5->Conv6->ReLU6->Pool3->
        Affine1(Hidden Layer1)->ReLU7->Dropout1->
        Affine2(Output Layer1)->Dropout2------->SoftmaxWithLoss
        """
        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['W1'],
                                           self.params['b1'],
                                           stride=conv_param_1['stride'],
                                           pad=conv_param_1['pad'])
        self.layers['ReLU1'] = ReLU()
        self.layers['Conv2'] = Convolution(self.params['W2'],
                                           self.params['b2'],
                                           stride=conv_param_2['stride'],
                                           pad=conv_param_2['pad'])
        self.layers['ReLU2'] = ReLU()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2, pad=0)
        self.layers['Conv3'] = Convolution(self.params['W3'],
                                           self.params['b3'],
                                           stride=conv_param_3['stride'],
                                           pad=conv_param_3['pad'])
        self.layers['ReLU3'] = ReLU()
        self.layers['Conv4'] = Convolution(self.params['W4'],
                                           self.params['b4'],
                                           stride=conv_param_4['stride'],
                                           pad=conv_param_4['pad'])
        self.layers['ReLU4'] = ReLU()
        self.layers['Pool2'] = Pooling(pool_h=2, pool_w=2, stride=2, pad=0)
        self.layers['Conv5'] = Convolution(self.params['W5'],
                                           self.params['b5'],
                                           stride=conv_param_5['stride'],
                                           pad=conv_param_5['pad'])
        self.layers['ReLU5'] = ReLU()
        self.layers['Conv6'] = Convolution(self.params['W6'],
                                           self.params['b6'],
                                           stride=conv_param_6['stride'],
                                           pad=conv_param_6['pad'])
        self.layers['ReLU6'] = ReLU()
        self.layers['Pool3'] = Pooling(pool_h=2, pool_w=2, stride=2, pad=0)
        self.layers['Affine1'] = Affine(self.params['W7'], self.params['b7'])
        self.layers['ReLU7'] = ReLU()
        self.layers['Dropout1'] = Dropout(dropout_ratio=0.5)
        self.layers['Affine2'] = Affine(self.params['W8'], self.params['b8'])
        self.layers['Dropout2'] = Dropout(dropout_ratio=0.5)

        self.last_layer = SoftmaxWithLoss()