def __call__(self, x, rois, train=False): h = F.relu(self.conv1_1(x)) h = F.relu(self.conv1_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv2_1(h)) h = F.relu(self.conv2_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv3_1(h)) h = F.relu(self.conv3_2(h)) h = F.relu(self.conv3_3(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv4_1(h)) h = F.relu(self.conv4_2(h)) h = F.relu(self.conv4_3(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv5_1(h)) h = F.relu(self.conv5_2(h)) h = F.relu(self.conv5_3(h)) h = roi_pooling_2d(h, rois, 7, 7, 0.0625) h = F.dropout(F.relu(self.fc6(h)), train=train, ratio=0.5) h = F.dropout(F.relu(self.fc7(h)), train=train, ratio=0.5) cls_score = F.softmax(self.cls_score(h)) bbox_pred = self.bbox_pred(h) return cls_score, bbox_pred
def __call__(self, x, rois, train=False): h = F.relu(self.conv1_1(x)) h = F.relu(self.conv1_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv2_1(h)) h = F.relu(self.conv2_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv3_1(h)) h = F.relu(self.conv3_2(h)) h = F.relu(self.conv3_3(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv4_1(h)) h = F.relu(self.conv4_2(h)) h = F.relu(self.conv4_3(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv5_1(h)) h = F.relu(self.conv5_2(h)) h = F.relu(self.conv5_3(h)) h = roi_pooling_2d(h, rois, 7, 7, 0.0625) h = F.dropout(F.relu(self.fc6(h)), train=train, ratio=0.5) h = F.dropout(F.relu(self.fc7(h)), train=train, ratio=0.5) cls_score = F.softmax(self.cls_score(h)) bbox_pred = self.bbox_pred(h) return cls_score, bbox_pred
def forward(self, x_data, rois, train=True): x = Variable(x_data, volatile=not train) rois = Variable(rois, volatile=not train) h = F.relu(self.conv1_1(x)) h = F.relu(self.conv1_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv2_1(h)) h = F.relu(self.conv2_2(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv3_1(h)) h = F.relu(self.conv3_2(h)) h = F.relu(self.conv3_3(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv4_1(h)) h = F.relu(self.conv4_2(h)) h = F.relu(self.conv4_3(h)) h = F.max_pooling_2d(h, 2, stride=2) h = F.relu(self.conv5_1(h)) h = F.relu(self.conv5_2(h)) h = F.relu(self.conv5_3(h)) h = roi_pooling_2d(h, rois) h = F.dropout(F.relu(self.fc6(h)), train=train, ratio=0.5) h = F.dropout(F.relu(self.fc7(h)), train=train, ratio=0.5) cls_score = F.softmax(self.cls_score(h)) bbox_pred = self.bbox_pred(h) return cls_score, bbox_pred
def __call__(self, x, rois, t=None, train=False): h = self.conv1(x) h = F.relu(h) h = F.local_response_normalization(h, n=5, k=2, alpha=5e-4, beta=.75) h = F.max_pooling_2d(h, ksize=3, stride=2) h = self.conv2(h) h = F.relu(h) h = F.local_response_normalization(h, n=5, k=2, alpha=5e-4, beta=.75) h = F.max_pooling_2d(h, ksize=3, stride=2) h = self.conv3(h) h = F.relu(h) h = self.conv4(h) h = F.relu(h) h = self.conv5(h) h = F.relu(h) h = roi_pooling_2d(h, rois, 6, 6, spatial_scale=0.0625) h = self.fc6(h) h = F.relu(h) h = F.dropout(h, train=train, ratio=.5) h = self.fc7(h) h = F.relu(h) h = F.dropout(h, train=train, ratio=.5) h_cls_score = self.cls_score(h) cls_score = F.softmax(h_cls_score) bbox_pred = self.bbox_pred(h) if t is None: assert train is False return cls_score, bbox_pred assert train t_cls, t_bbox = t self.cls_loss = F.softmax_cross_entropy(h_cls_score, t_cls) self.bbox_loss = F.smooth_l1_loss(bbox_pred, t_bbox) xp = cuda.get_array_module(x.data) lambda_ = (0.5 * (t_cls.data != self.bg_label)).astype(xp.float32) lambda_ = Variable(lambda_, volatile=not train) L = self.cls_loss + F.sum(lambda_ * self.bbox_loss) return L
def __call__(self, x, rois, t=None, train=False): h = self.conv1(x) h = F.relu(h) h = F.local_response_normalization(h, n=5, k=2, alpha=5e-4, beta=.75) h = F.max_pooling_2d(h, ksize=3, stride=2) h = self.conv2(h) h = F.relu(h) h = F.local_response_normalization(h, n=5, k=2, alpha=5e-4, beta=.75) h = F.max_pooling_2d(h, ksize=3, stride=2) h = self.conv3(h) h = F.relu(h) h = self.conv4(h) h = F.relu(h) h = self.conv5(h) h = F.relu(h) h = roi_pooling_2d(h, rois, 6, 6, spatial_scale=0.0625) h = self.fc6(h) h = F.relu(h) h = F.dropout(h, train=train, ratio=.5) h = self.fc7(h) h = F.relu(h) h = F.dropout(h, train=train, ratio=.5) h_cls_score = self.cls_score(h) cls_score = F.softmax(h_cls_score) bbox_pred = self.bbox_pred(h) if t is None: assert train is False return cls_score, bbox_pred assert train t_cls, t_bbox = t self.cls_loss = F.softmax_cross_entropy(h_cls_score, t_cls) self.bbox_loss = F.smooth_l1_loss(bbox_pred, t_bbox) xp = cuda.get_array_module(x.data) lambda_ = (0.5 * (t_cls.data != self.bg_label)).astype(xp.float32) lambda_ = Variable(lambda_, volatile=not train) L = self.cls_loss + F.sum(lambda_ * self.bbox_loss) return L
def __call__(self, x, rois): h = F.relu(self.conv1(x)) h = F.max_pooling_2d(h, 3, stride=2, pad=1) h = F.local_response_normalization(h, n=5, alpha=1e-4, beta=.75) h = F.relu(self.conv2(h)) h = F.max_pooling_2d(h, 3, stride=2, pad=1) h = F.local_response_normalization(h, n=5, alpha=1e-4, beta=.75) h = F.relu(self.conv3(h)) h = F.relu(self.conv4(h)) h = F.relu(self.conv5(h)) h = roi_pooling_2d(h, rois, 6, 6, 0.0625) h = F.dropout(F.relu(self.fc6(h)), train=train, ratio=0.5) h = F.dropout(F.relu(self.fc7(h)), train=train, ratio=0.5) cls_score = F.softmax(self.cls_score(h)) bbox_pred = self.bbox_pred(h) return cls_score, bbox_pred