Ejemplo n.º 1
0
    def __init__(self, classes=1000, **kwargs):
        super(AlexNet, self).__init__(**kwargs)
        with self.name_scope():
            self.features = nn.HybridSequential(prefix='')
            with self.features.name_scope():
                self.features.add(
                    nn.Conv2D(64,
                              kernel_size=11,
                              strides=4,
                              padding=2,
                              activation='relu'))
                self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
                self.features.add(nn.QConv2D(192, kernel_size=5, padding=2))
                self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
                self.features.add(nn.QConv2D(384, kernel_size=3, padding=1))
                self.features.add(nn.QConv2D(256, kernel_size=3, padding=1))
                self.features.add(nn.QConv2D(256, kernel_size=3, padding=1))
                self.features.add(nn.MaxPool2D(pool_size=3, strides=2))
                self.features.add(nn.Flatten())
                self.features.add(nn.QDense(4096))
                self.features.add(nn.Dropout(0.5))
                self.features.add(nn.QDense(4096))
                self.features.add(nn.Dropout(0.5))

            self.output = nn.Dense(classes)
Ejemplo n.º 2
0
 def __init__(self,
              layers,
              filters,
              classes=1000,
              batch_norm=False,
              **kwargs):
     super(VGG, self).__init__(**kwargs)
     assert len(layers) == len(filters)
     with self.name_scope():
         self.features = self._make_features(layers, filters, batch_norm)
         self.features.add(
             nn.QDense(4096,
                       weight_initializer='normal',
                       bias_initializer='zeros'))
         self.features.add(nn.Dropout(rate=0.5))
         self.features.add(
             nn.QDense(4096,
                       weight_initializer='normal',
                       bias_initializer='zeros'))
         self.features.add(nn.Dropout(rate=0.5))
         self.output = nn.Dense(classes,
                                weight_initializer='normal',
                                bias_initializer='zeros')
Ejemplo n.º 3
0
def test_binary_inference_fc():
    # setup data
    batch_size = 1
    bits_binary_word = 32
    num_hidden_fc = 10
    num_input_features = 1024
    input_data = mx.nd.random.normal(-1,
                                     1,
                                     shape=(batch_size, num_input_features))
    weight = mx.nd.random.normal(-1,
                                 1,
                                 shape=(num_hidden_fc, num_input_features))

    # input_npy = (np.sign(input_data.asnumpy()).flatten() + 1) / 2
    # weight_npy = (np.sign(weight.asnumpy()).flatten() + 1) / 2
    # result = 0
    # for i in range(len(weight_npy)):
    #     result += 0 if (input_npy[i] + weight_npy[i]) == 1 else 1

    # weights concatenation
    weight_T = weight.T
    size_binary_col = int(weight_T.size / bits_binary_word)
    weight_concatenated = np.zeros((size_binary_col), dtype='uint32')
    weight_concatenated = mx.nd.array(get_binary_col(weight_T.reshape(
        (-1)), weight_concatenated, weight_T.shape[0], weight_T.shape[1],
                                                     bits_binary_word),
                                      dtype='float64')
    weight_concatenated = weight_concatenated.reshape((weight_T.shape[1], -1))
    assert weight_concatenated.shape[0] == num_hidden_fc
    assert weight_concatenated.shape[
        1] == num_input_features // bits_binary_word
    # create binary inference fc layer
    binary_infer_result = mx.ndarray.BinaryInferenceFullyConnected(
        data=input_data, weight=weight_concatenated, num_hidden=num_hidden_fc)

    binary_infer_result2 = mx.ndarray.BinaryInferenceFullyConnected(
        data=input_data, weight=weight_concatenated, num_hidden=num_hidden_fc)

    # create qdense layer, assign weights and set input_data.
    qdense_layer = nn.QDense(num_hidden_fc)
    qact = nn.QActivation(bits=1)
    qact_result = qact.forward(input_data)
    qdense_result = qdense_layer.hybrid_forward(F,
                                                x=qact_result,
                                                weight=weight)

    np.testing.assert_equal(binary_infer_result.asnumpy(),
                            binary_infer_result2.asnumpy())
Ejemplo n.º 4
0
    def __init__(self,
                 layers,
                 filters,
                 classes=1000,
                 batch_norm=True,
                 isBin=False,
                 step=0,
                 **kwargs):
        super(VGG, self).__init__(**kwargs)
        assert len(layers) == len(filters)
        with self.name_scope():
            self.features = self._make_features(layers, filters, batch_norm,
                                                step)
            self.features.add((nn.Flatten()))
            if isBin:
                self.features.add(
                    nn.QDense(4096,
                              weight_initializer='normal',
                              bias_initializer='zeros'))
                self.features.add(nn.Activation('relu'))
                self.features.add(nn.Dropout(rate=0.5))
                self.features.add(
                    nn.Dense(4096,
                             weight_initializer='normal',
                             bias_initializer='zeros'))
                self.features.add(nn.Activation('relu'))
            else:
                self.features.add(
                    nn.Dense(4096,
                             weight_initializer='normal',
                             bias_initializer='zeros'))
                self.features.add(nn.Activation('relu'))
                self.features.add(nn.Dropout(rate=0.5))
                self.features.add(
                    nn.Dense(4096,
                             weight_initializer='normal',
                             bias_initializer='zeros'))
                self.features.add(nn.Activation('relu'))

            self.features.add(nn.Dropout(rate=0.5))
            self.output = nn.Dense(classes,
                                   weight_initializer='normal',
                                   bias_initializer='zeros')
Ejemplo n.º 5
0
parser.add_argument('--momentum', type=float, default=0.9,
                    help='SGD momentum (default: 0.9)')
parser.add_argument('--cuda', action='store_true', default=False,
                    help='Train on GPU with CUDA')
parser.add_argument('--bits', type=int, default=1,
                    help='Number of bits for binarization/quantization')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
                    help='how many batches to wait before logging training status')
opt = parser.parse_args()


# define network
net = nn.HybridSequential()
with net.name_scope():
    if opt.bits == 1:
        net.add(nn.QDense(128, bits=opt.bits))
        net.add(nn.QDense(64, bits=opt.bits))
        net.add(nn.QDense(10, bits=opt.bits))
    elif opt.bits < 32:
        raise RuntimeError("Quantization not yet supported")
    else:
        net.add(nn.Dense(128, activation='relu'))
        net.add(nn.Dense(64, activation='relu'))
        net.add(nn.Dense(10))
net.hybridize()

# data

def transformer(data, label):
    data = data.reshape((-1,)).astype(np.float32)/255
    return data, label