예제 #1
0
    def build_model(
            self,
            batch_size=100,
            input_size=784,
            hidden1=100,
            hidden2=100,
            out_classes=10,
            quant_param_path='../../mnist_mlp_quant_param.npz'):  # 建立网络结构
        self.batch_size = batch_size
        self.out_classes = out_classes
        # 在创建全连层时需要输入对应的量化参数,为了简化实验,本实验已经提供了网络的量化参数,
        # 读取到了 input_quant_params 和 filter_quant_params 中,搭建网络时,只需要按顺序
        # 为每个全连接层输入 input_quant_params 即可,加载参数时,同样也只需要按顺序把
        # filter_quant_params 中的值输入。

        # 加载量化参数
        params = np.load(quant_param_path)
        input_params = params['input']
        filter_params = params['filter']
        for i in range(0, len(input_params), 2):
            self.input_quant_params.append(
                pycnml.QuantParam(int(input_params[i]),
                                  float(input_params[i + 1])))
        for i in range(0, len(filter_params), 2):
            self.filter_quant_params.append(
                pycnml.QuantParam(int(filter_params[i]),
                                  float(filter_params[i + 1])))

        # TODO:使用 pycnml 建立三层神经网络结构
        self.net.setInputShape(batch_size, input_size, 1, 1)
        # fc1
        self.net.createMlpLayer('fc1', hidden1, self.input_quant_params[0])
        # relu
        self.net.createReLuLayer('relu1')
        # fc2
        self.net.createMlpLayer('fc2', hidden2, self.input_quant_params[1])
        # relu
        self.net.createReLuLayer('relu2')
        # fc3
        self.net.createMlpLayer('fc3', self.out_classes,
                                self.input_quant_params[2])
        # softmax
        self.net.createSoftmaxLayer('sf', axis=1)
예제 #2
0
    def build_model(self,
                    param_path='../../imagenet-vgg-verydeep-19.mat',
                    quant_param_path='../../vgg19_quant_param_new.npz'):
        self.param_path = param_path

        # loading quant params
        # before creating layers, you should run through the net with cpu and get positions and scales used for quantizing input data. you can get quant params by using pycnml.QuantTool
        # only conv and mlp layer need to be quantized
        # in this experiment these quant params have already been created and saved into local files. 
        params = np.load(quant_param_path)
        input_params = params['input']
        filter_params = params['filter']
        for i in range(0, len(input_params), 2):
            self.input_quant_params.append(pycnml.QuantParam(int(input_params[i]), float(input_params[i + 1])))
        for i in range(0, len(filter_params), 2):
            self.filter_quant_params.append(pycnml.QuantParam(int(filter_params[i]), float(filter_params[i + 1])))

        # TODO: 使用net的createXXXLayer接口搭建VGG19网络
        # creating layers
        self.net.setInputShape(1, 3, 224, 224)
        # conv1_1
        self.net.createConvLayer('conv1_1', 64, 3, 1, 1, 1, self.input_quant_params[0])
        # relu1_1
        self.net.createReLuLayer('relu1_1')
        # conv1_2
        self.net.createConvLayer('conv1_2', 64, 3, 1, 1, 1, self.input_quant_params[1])
        # relu1_2
        self.net.createReLuLayer('relu1_2')
        # pool1
        _______________________
        # conv2_1
        _______________________
        # relu2_1
        _______________________
        # conv2_2
        _______________________
        # relu2_2
        _______________________
        # pool2
        _______________________
        # conv3_1
        _______________________
        # relu3_1
        _______________________
        # conv3_2
        _______________________
        # relu3_2
        _______________________
        # conv3_3
        _______________________
        # relu3_3
        _______________________
        # conv3_4
        _______________________
        # relu3_4
        _______________________
        # pool3
        _______________________
        # conv4_1
        _______________________
        # relu4_1
        _______________________
        # conv4_2
        _______________________
        # relu4_2
        _______________________
        # conv4_3
        _______________________
        # relu4_3
        _______________________
        # conv4_4
        _______________________
        # relu4_4
        _______________________
        # pool4
        _______________________
        # conv5_1
        _______________________
        # relu5_1
        _______________________
        # conv5_2
        _______________________
        # relu5_2
        _______________________
        # conv5_3
        _______________________
        # relu5_3
        _______________________
        # conv5_4
        _______________________
        # relu5_4
        _______________________
        # pool5
        _______________________

        # flatten
        self.net.createFlattenLayer('flatten', [1, 512 * 7 * 7, 1, 1])
        # fc6
        _______________________
        # relu6
        _______________________
        # fc7
        _______________________
        # relu7
        _______________________
        # fc8
        self.net.createMlpLayer('fc8', 1000, self.input_quant_params[18])
        # softmax
        self.net.createSoftmaxLayer('softmax', 1)
예제 #3
0
    def build_model(self,
                    param_path='../../imagenet-vgg-verydeep-19.mat',
                    quant_param_path='../../vgg19_quant_param_new.npz'):
        self.param_path = param_path

        # loading quant params
        # before creating layers, you should run through the net with cpu and get positions and scales used for quantizing input data. you can get quant params by using pycnml.QuantTool
        # only conv and mlp layer need to be quantized
        # in this experiment these quant params have already been created and saved into local files.
        params = np.load(quant_param_path)
        input_params = params['input']
        filter_params = params['filter']
        for i in range(0, len(input_params), 2):
            self.input_quant_params.append(
                pycnml.QuantParam(int(input_params[i]),
                                  float(input_params[i + 1])))
        for i in range(0, len(filter_params), 2):
            self.filter_quant_params.append(
                pycnml.QuantParam(int(filter_params[i]),
                                  float(filter_params[i + 1])))

        # TODO: 使用net的createXXXLayer接口搭建VGG19网络
        # createConvLayer: layer_name, out_channel, kernel_size, stride, dilation, pad, quant_param
        # createPoolingLayer: layer_name, kernel_size, stride
        # createFlattenLayer: layer_name, output_shape
        # creating layers
        self.net.setInputShape(1, 3, 224, 224)
        # conv1_1
        self.net.createConvLayer('conv1_1', 64, 3, 1, 1, 1,
                                 self.input_quant_params[0])
        # relu1_1
        self.net.createReLuLayer('relu1_1')
        # conv1_2
        self.net.createConvLayer('conv1_2', 64, 3, 1, 1, 1,
                                 self.input_quant_params[1])
        # relu1_2
        self.net.createReLuLayer('relu1_2')
        # pool1
        self.net.createPoolingLayer('pool1', 2, 2)

        # conv2_1
        self.net.createConvLayer('conv2_1', 128, 3, 1, 1, 1,
                                 self.input_quant_params[2])
        # relu2_1
        self.net.createReLuLayer('relu2_1')
        # conv2_2
        self.net.createConvLayer('conv2_2', 128, 3, 1, 1, 1,
                                 self.input_quant_params[3])
        # relu2_2
        self.net.createReLuLayer('relu2_2')
        # pool2
        self.net.createPoolingLayer('pool2', 2, 2)

        # conv3_1
        self.net.createConvLayer('conv3_1', 256, 3, 1, 1, 1,
                                 self.input_quant_params[4])
        # relu3_1
        self.net.createReLuLayer('relu3_1')
        # conv3_2
        self.net.createConvLayer('conv3_2', 256, 3, 1, 1, 1,
                                 self.input_quant_params[5])
        # relu3_2
        self.net.createReLuLayer('relu3_2')
        # conv3_3
        self.net.createConvLayer('conv3_3', 256, 3, 1, 1, 1,
                                 self.input_quant_params[6])
        # relu3_3
        self.net.createReLuLayer('relu3_3')
        # conv3_4
        self.net.createConvLayer('conv3_4', 256, 3, 1, 1, 1,
                                 self.input_quant_params[7])
        # relu3_4
        self.net.createReLuLayer('relu3_4')
        # pool3
        self.net.createPoolingLayer('pool3', 2, 2)

        # conv4_1
        self.net.createConvLayer('conv4_1', 512, 3, 1, 1, 1,
                                 self.input_quant_params[8])
        # relu4_1
        self.net.createReLuLayer('relu4_1')
        # conv4_2
        self.net.createConvLayer('conv4_2', 512, 3, 1, 1, 1,
                                 self.input_quant_params[9])
        # relu4_2
        self.net.createReLuLayer('relu4_2')
        # conv4_3
        self.net.createConvLayer('conv4_3', 512, 3, 1, 1, 1,
                                 self.input_quant_params[10])
        # relu4_3
        self.net.createReLuLayer('relu4_3')
        # conv4_4
        self.net.createConvLayer('conv4_4', 512, 3, 1, 1, 1,
                                 self.input_quant_params[11])
        # relu4_4
        self.net.createReLuLayer('relu4_4')
        # pool4
        self.net.createPoolingLayer('pool4', 2, 2)

        # conv5_1
        self.net.createConvLayer('conv5_1', 512, 3, 1, 1, 1,
                                 self.input_quant_params[12])
        # relu5_1
        self.net.createReLuLayer('relu5_1')
        # conv5_2
        self.net.createConvLayer('conv5_2', 512, 3, 1, 1, 1,
                                 self.input_quant_params[13])
        # relu5_2
        self.net.createReLuLayer('relu5_2')
        # conv5_3
        self.net.createConvLayer('conv5_3', 512, 3, 1, 1, 1,
                                 self.input_quant_params[14])
        # relu5_3
        self.net.createReLuLayer('relu5_3')
        # conv5_4
        self.net.createConvLayer('conv5_4', 512, 3, 1, 1, 1,
                                 self.input_quant_params[15])
        # relu5_4
        self.net.createReLuLayer('relu5_4')
        # pool5
        self.net.createPoolingLayer('pool5', 2, 2)

        # flatten
        self.net.createFlattenLayer('flatten', [1, 512 * 7 * 7, 1, 1])

        # fc6
        self.net.createMlpLayer('fc6', 4096, self.input_quant_params[16])
        # relu6
        self.net.createReLuLayer('relu6')
        # fc7
        self.net.createMlpLayer('fc7', 4096, self.input_quant_params[17])
        # relu7
        self.net.createReLuLayer('relu7')
        # fc8
        self.net.createMlpLayer('fc8', 1000, self.input_quant_params[18])
        # softmax
        self.net.createSoftmaxLayer('softmax', 1)