Beispiel #1
0
 def GroupConv(self, *args, **kwargs):
     return brew.group_conv(
         self,
         *args,
         use_cudnn=self.use_cudnn,
         order=self.order,
         cudnn_exhaustive_search=self.cudnn_exhaustive_search,
         ws_nbytes_limit=self.ws_nbytes_limit,
         **kwargs)
Beispiel #2
0
 def GroupConv(self, *args, **kwargs):
     return brew.group_conv(
         self,
         *args,
         use_cudnn=self.use_cudnn,
         order=self.order,
         cudnn_exhaustive_search=self.cudnn_exhaustive_search,
         ws_nbytes_limit=self.ws_nbytes_limit,
         **kwargs
     )
Beispiel #3
0
 def add_group_conv(self, in_filters, out_filters, kernel, stride=1, pad=0, group=1):
     self.comp_idx += 1
     self.prev_blob = brew.group_conv(
         self.model,
         self.prev_blob,
         'comp_%d_conv_%d' % (self.comp_count, self.comp_idx),
         in_filters,
         out_filters,
         weight_init=("MSRAFill", {}),
         kernel=kernel,
         stride=stride,
         pad=pad,
         no_bias=self.no_bias,
         group=group,
     )
     return self.prev_blob
Beispiel #4
0
 def _MiniAlexNetNoDropout(self, order):
     # First, AlexNet using the cnn wrapper.
     model = model_helper.ModelHelper(name="alexnet")
     conv1 = brew.conv(
         model,
         "data",
         "conv1",
         3,
         16,
         11,
         ("XavierFill", {}),
         ("ConstantFill", {}),
         stride=4,
         pad=0
     )
     relu1 = brew.relu(model, conv1, "relu1")
     norm1 = brew.lrn(model, relu1, "norm1", size=5, alpha=0.0001, beta=0.75)
     pool1 = brew.max_pool(model, norm1, "pool1", kernel=3, stride=2)
     conv2 = brew.group_conv(
         model,
         pool1,
         "conv2",
         16,
         32,
         5,
         ("XavierFill", {}),
         ("ConstantFill", {"value": 0.1}),
         group=2,
         stride=1,
         pad=2
     )
     relu2 = brew.relu(model, conv2, "relu2")
     norm2 = brew.lrn(model, relu2, "norm2", size=5, alpha=0.0001, beta=0.75)
     pool2 = brew.max_pool(model, norm2, "pool2", kernel=3, stride=2)
     conv3 = brew.conv(
         model,
         pool2,
         "conv3",
         32,
         64,
         3,
         ("XavierFill", {'std': 0.01}),
         ("ConstantFill", {}),
         pad=1
     )
     relu3 = brew.relu(model, conv3, "relu3")
     conv4 = brew.group_conv(
         model,
         relu3,
         "conv4",
         64,
         64,
         3,
         ("XavierFill", {}),
         ("ConstantFill", {"value": 0.1}),
         group=2,
         pad=1
     )
     relu4 = brew.relu(model, conv4, "relu4")
     conv5 = brew.group_conv(
         model,
         relu4,
         "conv5",
         64,
         32,
         3,
         ("XavierFill", {}),
         ("ConstantFill", {"value": 0.1}),
         group=2,
         pad=1
     )
     relu5 = brew.relu(model, conv5, "relu5")
     pool5 = brew.max_pool(model, relu5, "pool5", kernel=3, stride=2)
     fc6 = brew.fc(
         model, pool5, "fc6", 1152, 1024, ("XavierFill", {}),
         ("ConstantFill", {"value": 0.1})
     )
     relu6 = brew.relu(model, fc6, "relu6")
     fc7 = brew.fc(
         model, relu6, "fc7", 1024, 1024, ("XavierFill", {}),
         ("ConstantFill", {"value": 0.1})
     )
     relu7 = brew.relu(model, fc7, "relu7")
     fc8 = brew.fc(
         model, relu7, "fc8", 1024, 5, ("XavierFill", {}),
         ("ConstantFill", {"value": 0.0})
     )
     pred = brew.softmax(model, fc8, "pred")
     xent = model.LabelCrossEntropy([pred, "label"], "xent")
     loss = model.AveragedLoss([xent], ["loss"])
     model.AddGradientOperators([loss])
     return model