def __call__(self, x, train=True):
        h1 = F.relu(self.conv1_1(x))
        h2 = F.max_pooling_2d(h1, 2, stride=2)

        h3 = F.relu(self.conv2_1(h2))
        h4 = F.max_pooling_2d(h3, 2, stride=2)

        h5 = F.relu(self.conv3_1(h4))
        h6 = F.relu(self.conv3_2(h5))
        h7 = F.max_pooling_2d(h6, 2, stride=2)

        h8 = F.relu(self.conv4_1(h7))
        h9 = F.relu(self.conv4_2(h8))
        h10 = F.max_pooling_2d(h9, 2, stride=2)

        h11 = F.relu(self.conv5_1(h10))
        h12 = F.relu(self.conv5_2(h11))
        h13 = F.max_pooling_2d(h12, 2, stride=2)

        h14 = F.dropout(F.relu(self.fc6(h13)), train=train, ratio=0.5)
        h15 = F.dropout(F.relu(self.fc7(h14)), train=train, ratio=0.5)
        h16 = self.fc8(h15)
        y = F.sigmoid(h16)

        return y
示例#2
0
 def __call__(self, x):
     """Compute actions for given observations."""
     h = F.relu(self.l0(x))
     h = F.relu(self.l1(h))
     return squash(self.l2(h),
                   self.xp.asarray(self.action_low),
                   self.xp.asarray(self.action_high))
示例#3
0
    def __call__(self, x):
        h1 = F.relu(self.bn1(self.conv1(x)))
        h1 = F.relu(self.bn2(self.conv2(h1)))
        h1 = self.bn3(self.conv3(h1))
        h2 = self.bn4(self.conv4(x))

        return F.relu(h1 + h2)
 def call_inception(self, x, name):
     out1 = self[name + '/1x1'](x)
     out3 = self[name + '/3x3'](F.relu(self[name + '/3x3_reduce'](x)))
     out5 = self[name + '/5x5'](F.relu(self[name + '/5x5_reduce'](x)))
     pool = self[name + '/pool_proj'](F.max_pooling_2d(x, 3, stride=1, pad=1))
     y = F.relu(F.concat((out1, out3, out5, pool), axis=1))
     return y
示例#5
0
 def __call__(self, z, test=False):
     h = F.reshape(F.relu(self.bn0l(self.l0z(z), test=test)), (z.data.shape[0], 512, 6, 6))
     h = F.relu(self.bn1(self.dc1(h), test=test))
     h = F.relu(self.bn2(self.dc2(h), test=test))
     h = F.relu(self.bn3(self.dc3(h), test=test))
     x = self.dc4(h)
     return x
示例#6
0
 def __call__(self, x, train=True):
     self.train = train
     h = F.max_pooling_2d( F.relu(self.bn1(self.conv1(x), test=not self.train)), 3, stride=2 )
     h = F.max_pooling_2d( F.relu(self.bn2(self.conv2(h), test=not self.train)), 3, stride=2 )
     h = F.max_pooling_2d( F.relu(self.bn3(self.conv3(h), test=not self.train)), 3, stride=2 )
     h = self.fc1(h)
     return h
示例#7
0
    def __call__(self, x):
        x.data = x.data / np.linalg.norm(x.data)
        h1 = F.relu(self.l1(x))
        h2 = F.relu(self.l2(h1))
        h3 = F.relu(self.l3(h2))

        return h3;
示例#8
0
def forward(x_data, y_data, train=True):
    """ 順伝搬の処理を定義 """
    # train ... 学習フラグ(Falseにすると学習しない)
    # 訓練時は,dropoutを実行
    # テスト時は,dropoutを無効

    # 入力と教師データ
    # データ配列は,Chainer.Variable型にしないといけない
    x, t = chainer.Variable(x_data), chainer.Variable(y_data)
    # 隠れ層1の出力
    h1 = F.dropout(F.relu(model.l1(x)), train=train)
    # 隠れ層2の出力
    h2 = F.dropout(F.relu(model.l2(h1)), train=train)
    # 出力層の出力
    y = model.l3(h2)

    # 訓練時とテスト時で返す値を変える
    # y ... ネットワークの出力(仮説)
    # t ... 教師データ
    if train:   # 訓練
        # 誤差を返す
        # 多クラス分類なので,誤差関数としてソフトマックス関数の
        # クロスエントロピー関数を使う
        loss = F.softmax_cross_entropy(y, t)
        return loss
    else:   # テスト
        # 精度を返す
        acc = F.accuracy(y, t)
        return acc
    def __call__(self, x, im_info):
        h, n = self.trunk(x), x.data.shape[0]
        rpn_cls_score = self.rpn_cls_score(h)
        c, hh, ww = rpn_cls_score.data.shape[1:]
        rpn_bbox_pred = self.rpn_bbox_pred(h)
        rpn_cls_score = F.reshape(rpn_cls_score, (n, 2, -1))

        # RoI Proposal
        rpn_cls_prob = F.softmax(rpn_cls_score)
        rpn_cls_prob_reshape = F.reshape(rpn_cls_prob, (n, c, hh, ww))
        rois = self.proposal_layer(
            rpn_cls_prob_reshape, rpn_bbox_pred, im_info, self.train)
        boxes = rois[:, 1:5] / im_info[0][2]
        rois = chainer.Variable(rois, volatile=not self.train)

        # RCNN
        pool5 = F.roi_pooling_2d(self.trunk.relu5_3_out, rois, 7, 7, 0.0625)
        fc6 = F.relu(self.fc6(pool5))
        fc7 = F.relu(self.fc7(fc6))
        self.scores = F.softmax(self.cls_score(fc7))

        box_deltas = self.bbox_pred(fc7).data
        pred_boxes = bbox_transform_inv(boxes, box_deltas)
        self.pred_boxes = clip_boxes(pred_boxes, im_info[0][:2])

        if self.train:
            # loss_cls = F.softmax_cross_entropy(cls_score, labels)
            # huber loss with delta=1 means SmoothL1Loss
            return None
        else:
            return self.scores, self.pred_boxes
示例#10
0
    def extract_features(self, x, train=True):
        h = F.relu(self.conv1(x))
        h = F.relu(self.conv2(h))
        self.h1 = h
        h = F.max_pooling_2d(h,2)
        
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        self.h2 = h
        h = F.max_pooling_2d(h,2)
        
        h = F.relu(self.conv5(h))
        h = F.relu(self.conv6(h))
        self.h3 = h
        h = F.max_pooling_2d(h,2)
        
        h = F.relu(self.conv7(h))
        h = F.relu(self.conv8(h))
        self.h4 = h
        h = F.max_pooling_2d(h,2)

        h = F.relu(self.conv9(h))
        h = F.relu(self.conv10(h))
        self.h5 = h
        h = F.max_pooling_2d(h,2)
        return h
示例#11
0
    def __call__(self, x, train):
        h1 = F.relu(self.bn1(self.conv1(x), test=not train))
        h1 = F.relu(self.bn2(self.conv2(h1), test=not train))
        h1 = self.bn3(self.conv3(h1), test=not train)
        h2 = self.bn4(self.conv4(x), test=not train)

        return F.relu(h1 + h2)
示例#12
0
    def forward_super(self, x, train=True):
        h = F.relu(self.conv1(x))
        h = F.relu(self.conv2(h))
        y = self.conv3(h)


        return y
示例#13
0
def forward(x, is_train=True):
    h1 = F.max_pooling_2d(F.relu(model.conv1(x)), 3)
    h2 = F.max_pooling_2d(F.relu(model.conv2(h1)), 3)
    h3 = F.dropout(F.relu(model.l1(h2)), train=is_train)
    h4 = F.dropout(F.relu(model.l2(h3)), train=is_train)
    p = model.l3(h4)
    return p
示例#14
0
 def __call__(self, x, train=True):
     h1 = F.relu(self.conv1(x))
     h2 = F.relu(self.conv2(h1))
     h3 = F.relu(self.conv3(h2))
     h4 = self.lstm(h3)
     q = self.q(h4)
     return q
示例#15
0
def forward(x_data, y_data, train=True):
    # Neural net architecture
    x, t = chainer.Variable(x_data), chainer.Variable(y_data)
    h = F.max_pooling_2d(F.dropout(F.relu(model.bn2(model.cv1(x))),  train=train),2)
    h = F.dropout(F.relu(model.ln3(h)), train=train)
    y = model.ln4(h)
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
示例#16
0
    def forward(self, x_data, y_data, train=True):
        x = Variable(x_data, volatile=not train)
        t = Variable(y_data, volatile=not train)

        h = F.relu(self.bn1_1(self.conv1_1(x)))
        h = F.relu(self.bn1_2(self.conv1_2(h)))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)

        h = F.relu(self.bn2_1(self.conv2_1(h)))
        h = F.relu(self.bn2_2(self.conv2_2(h)))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)

        h = F.relu(self.bn3_1(self.conv3_1(h)))
        h = F.relu(self.bn3_2(self.conv3_2(h)))
        h = F.relu(self.bn3_3(self.conv3_3(h)))
        h = F.relu(self.bn3_4(self.conv3_4(h)))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)

        h = F.dropout(F.relu(self.fc4(h)), train=train, ratio=0.5)
        h = F.dropout(F.relu(self.fc5(h)), train=train, ratio=0.5)
        h = self.fc6(h)

        if train:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
        else:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t), h
示例#17
0
def forward(x_data, y_data):
  x = Variable(x_data)
  t = Variable(y_data)
  h1 = F.relu(model.l1(x))
  h2 = F.relu(model.l2(h1))
  y = model.l3(h2)
  return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
示例#18
0
 def predict(self, test_x: np.ndarray):
     test_x = Variable(test_x)
     self.h1 = F.dropout(F.relu(self.l1(test_x)))
     self.h2 = F.dropout(F.relu(self.l2(self.h1)))
     y = self.l3(self.h2)
     predict_list = list(map(np.argmax, F.softmax(y).data))
     return predict_list
示例#19
0
	def forward(self, x_data, y_data, train=True, gpu=-1):
		x, t = Variable(x_data), Variable(y_data)
		h = F.max_pooling_2d(F.relu(self.conv1(x)), ksize=2, stride=2)
		h = F.max_pooling_2d(F.relu(self.conv2(h)), ksize=3, stride=3)
		h = F.dropout(F.relu(self.l3(h)), train=train)
		y = self.l4(h)
		return F.softmax_cross_entropy(y, t), F.accuracy(y,t)
def forward(x_data, y_data, print_conf_matrix=False):
    '''
    Neural net architecture
    :param x_data:
    :param y_data:
    :param train:
    :return:
    '''
    x, t = Variable(x_data), Variable(y_data)

    h1 = F.relu(model.l1(x))
    h1 = F.max_pooling_2d(h1,max_pool_window_1,stride=max_pool_stride_1)

    h2 = F.dropout(F.relu(model.l2(h1)))
    h2 = F.average_pooling_2d(h2, avg_pool_window_2, stride=avg_pool_stride_2)
    h2 = F.max_pooling_2d(h2,max_pool_window_2,stride=max_pool_stride_2)

    y = model.l3(h2)

    # display confusion matrix
    if print_conf_matrix:
        pdb.set_trace()
        print confusion_matrix(cuda.to_cpu(t.data), cuda.to_cpu(y.data).argmax(axis=1))

    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
 def Q_func_target(self, state):
     h1 = F.relu(self.CNN_model_target.l1(state / 254.0))  # scale inputs in [0.0 1.0]
     h2 = F.relu(self.CNN_model_target.l2(h1))
     h3 = F.relu(self.CNN_model_target.l3(h2))
     h4 = F.relu(self.model.l4(h3))
     Q = self.model_target.q_value(h4)
     return Q
示例#22
0
    def __call__(self, x, rois, roi_indices):
        """Forward the chain.

        We assume that there are :math:`N` batches.

        Args:
            x (~chainer.Variable): 4D image variable.
            rois (array): A bounding box array containing coordinates of
                proposal boxes.  This is a concatenation of bounding box
                arrays from multiple images in the batch.
                Its shape is :math:`(R', 4)`. Given :math:`R_i` proposed
                RoIs from the :math:`i` th image,
                :math:`R' = \\sum _{i=1} ^ N R_i`.
            roi_indices (array): An array containing indices of images to
                which bounding boxes correspond to. Its shape is :math:`(R',)`.

        """
        roi_indices = roi_indices.astype(np.float32)
        indices_and_rois = self.xp.concatenate(
            (roi_indices[:, None], rois), axis=1)
        pool = _roi_pooling_2d_yx(
            x, indices_and_rois, self.roi_size, self.roi_size,
            self.spatial_scale)

        fc6 = F.relu(self.fc6(pool))
        fc7 = F.relu(self.fc7(fc6))
        roi_cls_locs = self.cls_loc(fc7)
        roi_scores = self.score(fc7)
        return roi_cls_locs, roi_scores
示例#23
0
 def compute_features(self, obs):
     obs = F.cast(obs, np.float32)
     obs = F.transpose(obs, (0, 3, 1, 2))
     h1 = F.relu(self.conv1(obs))
     h2 = F.relu(self.conv2(h1))
     h3 = F.relu(self.fc(h2))
     return h3
示例#24
0
    def forward(self, x_data, y_data, train=True, models=None):
        VGG_mini = models["VGG_mini"]
        VGG_mini2 = models["VGG_mini2"]
        VGG_mini3 = models["VGG_mini3"]
        
        x = Variable(x_data, volatile=not train)
        t = Variable(y_data, volatile=not train)

        h = F.relu(self.conv1_1(x))
        h = F.relu(self.conv1_2(h))
        h = F.relu(self.conv1_3(h))
        h = F.relu(self.conv1_4(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)
        
        h = F.relu(self.conv1_5(h))
        h = F.max_pooling_2d(h, 2, stride=2)
        h = F.dropout(h, ratio=0.25, train=train)
        
        h = self.fc(h)

        if train:
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
        else:
            # return F.softmax_cross_entropy(h, t), F.accuracy(h, t), h
            return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
示例#25
0
 def __call__(self, x):
     h = F.relu(self.conv1(x))
     h = F.relu(self.conv2(h))
     h = F.relu(self.conv3(h))
     h = F.relu(self.fc4(h))
     h = self.fc5(h)
     return h
 def __call__(self, state):
     h1 = F.relu(self.conv1(state))
     h2 = F.relu(self.l1(h1))
     h3 = F.relu(self.l2(h2))
     h4 = F.relu(self.l3(h3))
     h5 = self.l4(h4)
     return h5
    def predict(self,x):
        h1 = F.dropout(F.relu(self.l1(x)),ratio=self.dropout1,train=False)
        h2 = F.dropout(F.relu(self.l2(h1)),ratio=self.dropout2,train=False)
        h3 = F.dropout(F.relu(self.l3(h2)),ratio=self.dropout3,train=False)
        h4 = self.l4(h3)

        return h4
示例#28
0
    def __call__(self, x, t):
        h = F.relu(self.bn1_1(self.conv1_1(x), test=not self.train))
        h = F.relu(self.bn1_2(self.conv1_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.relu(self.bn2_1(self.conv2_1(h), test=not self.train))
        h = F.relu(self.bn2_2(self.conv2_2(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.relu(self.bn3_1(self.conv3_1(h), test=not self.train))
        h = F.relu(self.bn3_2(self.conv3_2(h), test=not self.train))
        h = F.relu(self.bn3_3(self.conv3_3(h), test=not self.train))
        h = F.relu(self.bn3_4(self.conv3_4(h), test=not self.train))
        h = F.max_pooling_2d(h, 2, 2)
        h = F.dropout(h, ratio=0.25, train=self.train)

        h = F.dropout(F.relu(self.fc4(h)), ratio=0.5, train=self.train)
        h = F.dropout(F.relu(self.fc5(h)), ratio=0.5, train=self.train)
        h = self.fc6(h)

        self.pred = F.softmax(h)
        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(self.pred, t)

        if self.train:
            return self.loss
        else:
            return self.pred
示例#29
0
    def __call__(self,x,y,state,train=True,target=True):
        h = Variable(x.reshape(len(x), 1, 4, 3), volatile=not train)
        t = Variable(y.flatten(), volatile=not train)

        h0 = F.max_pooling_2d(F.relu(self.conv1(h)), 2)
        h0 = F.max_pooling_2d(F.relu(self.conv2(h0)), 2)
        
        if target == False:
            data = h0.data
            self.data_first.append(data)
        
        h1= F.dropout(F.relu(self.l1(h0)),ratio=0.5,train=train)
        
        if target == False:
            data = h1.data
            self.data_hidden.append(data)
        
        y = self.l2(h1)

        if target ==False:
            data = y.data
            self.data_output.append(data)

        self.loss = F.softmax_cross_entropy(y,t)

        return state, self.loss
示例#30
0
def forward(x_data, y_data, train=True):
    # Neural net architecture
    x, t = chainer.Variable(x_data), chainer.Variable(y_data)
    h1 = F.dropout(F.relu(model.l1(x)), train=train)
    h2 = F.dropout(F.relu(model.l2(h1)), train=train)
    y = model.l3(h2)
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
示例#31
0
    def forward(self, x):
        h = F.relu(self.conv1(x))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(h, (1, 2), stride=(1, 2))
        h = F.reshape(h, (h.data.shape[0], h.data.shape[2], h.data.shape[1],
                          h.data.shape[3]))

        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(h, (5, 3))
        h = F.relu(self.conv5(h))
        h = F.relu(self.conv6(h))
        h = F.max_pooling_2d(h, (1, 2))
        h = F.relu(self.conv7(h))
        h = F.relu(self.conv8(h))
        h = F.max_pooling_2d(h, (1, 2))
        h = F.relu(self.conv9(h))
        h = F.relu(self.conv10(h))
        h = F.max_pooling_2d(h, (1, 2))

        #Get power spectrum by FFT. data_shape = (6,)
        # frequency by 1.25Hz
        # sampling_rate=1000Hz
        # freq_names = {'delta','theta','lalpha','halpha','beta','lgamma'};
        # freq_bands = [1 4; 4 8; 8 10; 10 13; 13 30; 30 50];
        # delta:1.25-3.75:
        # theta: 5-7.5:
        # lalpha:8.75-10:
        # halpha:11.25-12.5:
        # beta:13.75-30:
        # lgamma:31.25-50:
        tmp = cupy.abs(cupy.fft.fft(x))

        delta = cupy.average(tmp[:, :, :, 1:4], axis=3)
        theta = cupy.average(tmp[:, :, :, 4:7], axis=3)
        lalpha = cupy.average(tmp[:, :, :, 7:9], axis=3)
        halpha = cupy.average(tmp[:, :, :, 9:11], axis=3)
        beta = cupy.average(tmp[:, :, :, 11:25], axis=3)
        lgamma = cupy.average(tmp[:, :, :, 25:41], axis=3)
        Sum = delta + theta + lalpha + halpha + beta + lgamma

        power_spectral = cupy.zeros((x.shape[0], x.shape[1], x.shape[2], 6))
        power_spectral[:, :, :, 0] = cupy.divide(delta, Sum)
        power_spectral[:, :, :, 1] = cupy.divide(theta, Sum)
        power_spectral[:, :, :, 2] = cupy.divide(lalpha, Sum)
        power_spectral[:, :, :, 3] = cupy.divide(halpha, Sum)
        power_spectral[:, :, :, 4] = cupy.divide(beta, Sum)
        power_spectral[:, :, :, 5] = cupy.divide(lgamma, Sum)
        power_spectral = chainer.Variable(power_spectral)
        power_spectral = F.cast(power_spectral, cupy.float32)

        h = F.reshape(h, (h.shape[0], h.shape[1] * h.shape[2] * h.shape[3]))
        power_spectral = F.reshape(
            power_spectral,
            (power_spectral.shape[0], power_spectral.shape[1] *
             power_spectral.shape[2] * power_spectral.shape[3]))

        h = F.relu(self.norm1(self.fc11(h)))
        h = F.dropout(h)
        h = F.relu(self.norm2(self.fc12(h)))
        h = F.dropout(h)
        #Concatenate the features extracted by deep neural network and relative power spectrum
        h = F.concat((h, power_spectral), axis=1)
        h = self.fc13(h)

        if chainer.config.train:
            return h
        return F.softmax(h)
示例#32
0
文件: model.py 项目: mikittt/portrait
 def __call__(self, x, test=False):
     h = F.relu(self.b1(self.c1(x), test=test))
     h = F.relu(self.b2(self.c2(h), test=test))
     h = F.relu(self.b3(self.c3(h), test=test))
     
     return h
示例#33
0
    def predict_depth(self, rgb, mask_score, depth_viz):
        # concatenate rgb and depth_viz
        concat_input = F.concat((rgb, depth_viz), axis=1)

        # conv_depth_1
        h = F.relu(self.conv_depth_1_1(concat_input))
        h = F.relu(self.conv_depth_1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        depth_pool1 = h  # 1/2

        # conv_depth_2
        h = F.relu(self.conv_depth_2_1(depth_pool1))
        h = F.relu(self.conv_depth_2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        depth_pool2 = h  # 1/4

        # conv_depth_3
        h = F.relu(self.conv_depth_3_1(depth_pool2))
        h = F.relu(self.conv_depth_3_2(h))
        h = F.relu(self.conv_depth_3_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        depth_pool3 = h  # 1/8

        # conv_depth_4
        h = F.relu(self.conv_depth_4_1(depth_pool3))
        h = F.relu(self.conv_depth_4_2(h))
        h = F.relu(self.conv_depth_4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        depth_pool4 = h  # 1/16

        # conv_depth_5
        h = F.relu(self.conv_depth_5_1(depth_pool4))
        h = F.relu(self.conv_depth_5_2(h))
        h = F.relu(self.conv_depth_5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        depth_pool5 = h  # 1/32

        if self.masking is True:
            # (N, C, H, W) -> (N, H, W)
            mask_pred_tmp = F.argmax(self.score_label, axis=1)
            # (N, H, W) -> (N, 1, H, W), float required for resizing
            mask_pred_tmp = mask_pred_tmp[:, None, :, :].data.astype(
                self.xp.float32)  # 1/1
            resized_mask_pred = F.resize_images(
                mask_pred_tmp,
                (depth_pool5.shape[2], depth_pool5.shape[3]))  # 1/32
            depth_pool5_cp = depth_pool5
            masked_depth_pool5 = depth_pool5_cp * \
                (resized_mask_pred.data == 0.0).astype(self.xp.float32)
        else:
            masked_depth_pool5 = depth_pool5

        # depth_fc6
        h = F.relu(self.depth_fc6(masked_depth_pool5))
        h = F.dropout(h, ratio=.5)
        depth_fc6 = h  # 1/32

        # depth_fc7
        h = F.relu(self.depth_fc7(depth_fc6))
        h = F.dropout(h, ratio=.5)
        depth_fc7 = h  # 1/32

        # depth_score_fr
        h = self.depth_score_fr(depth_fc7)
        depth_score_fr = h  # 1/32

        # depth_score_pool3
        scale_depth_pool3 = 0.0001 * depth_pool3
        h = self.depth_score_pool3(scale_depth_pool3)
        depth_score_pool3 = h  # 1/8

        # depth_score_pool4
        scale_depth_pool4 = 0.01 * depth_pool4
        h = self.depth_score_pool4(scale_depth_pool4)
        depth_score_pool4 = h  # 1/16

        # depth upscore2
        h = self.depth_upscore2(depth_score_fr)
        depth_upscore2 = h  # 1/16

        # depth_score_pool4c
        h = depth_score_pool4[:, :, 5:5 + depth_upscore2.data.shape[2],
                              5:5 + depth_upscore2.data.shape[3]]
        depth_score_pool4c = h  # 1/16

        # depth_fuse_pool4
        h = depth_upscore2 + depth_score_pool4c
        depth_fuse_pool4 = h  # 1/16

        # depth_upscore_pool4
        h = self.depth_upscore_pool4(depth_fuse_pool4)
        depth_upscore_pool4 = h  # 1/8

        # depth_score_pool3c
        h = depth_score_pool3[:, :, 9:9 + depth_upscore_pool4.data.shape[2],
                              9:9 + depth_upscore_pool4.data.shape[3]]
        depth_score_pool3c = h  # 1/8

        # depth_fuse_pool3
        h = depth_upscore_pool4 + depth_score_pool3c
        depth_fuse_pool3 = h  # 1/8

        # depth_upscore8
        h = self.depth_upscore8(depth_fuse_pool3)
        depth_upscore8 = h  # 1/1

        # depth_score
        h = depth_upscore8[:, :, 31:31 + rgb.shape[2], 31:31 + rgb.shape[3]]
        depth_score = h  # 1/1

        # (-inf, inf) -> (0, 1) -> (min_depth, max_depth)
        h = F.sigmoid(depth_score)
        h = h * (self.max_depth - self.min_depth) + self.min_depth
        depth_pred = h

        return depth_pred
示例#34
0
    def predict_mask(self, rgb, return_pool5=False):
        # conv_rgb_1
        h = F.relu(self.conv_rgb_1_1(rgb))
        h = F.relu(self.conv_rgb_1_2(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        rgb_pool1 = h  # 1/2

        # conv_rgb_2
        h = F.relu(self.conv_rgb_2_1(rgb_pool1))
        h = F.relu(self.conv_rgb_2_2(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        rgb_pool2 = h  # 1/4

        # conv_rgb_3
        h = F.relu(self.conv_rgb_3_1(rgb_pool2))
        h = F.relu(self.conv_rgb_3_2(h))
        h = F.relu(self.conv_rgb_3_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        rgb_pool3 = h  # 1/8

        # conv_rgb_4
        h = F.relu(self.conv_rgb_4_1(rgb_pool3))
        h = F.relu(self.conv_rgb_4_2(h))
        h = F.relu(self.conv_rgb_4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        rgb_pool4 = h  # 1/16

        # conv_rgb_5
        h = F.relu(self.conv_rgb_5_1(rgb_pool4))
        h = F.relu(self.conv_rgb_5_2(h))
        h = F.relu(self.conv_rgb_5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        rgb_pool5 = h  # 1/32

        # rgb_fc6
        h = F.relu(self.rgb_fc6(rgb_pool5))
        h = F.dropout(h, ratio=.5)
        rgb_fc6 = h  # 1/32

        # rgb_fc7
        h = F.relu(self.rgb_fc7(rgb_fc6))
        h = F.dropout(h, ratio=.5)
        rgb_fc7 = h  # 1/32

        # mask_score_fr
        h = self.mask_score_fr(rgb_fc7)
        mask_score_fr = h  # 1/32

        # mask_score_pool3
        scale_rgb_pool3 = 0.0001 * rgb_pool3
        h = self.mask_score_pool3(scale_rgb_pool3)
        mask_score_pool3 = h  # 1/8

        # mask_score_pool4
        scale_rgb_pool4 = 0.01 * rgb_pool4
        h = self.mask_score_pool4(scale_rgb_pool4)
        mask_score_pool4 = h  # 1/16

        # mask upscore2
        h = self.mask_upscore2(mask_score_fr)
        mask_upscore2 = h  # 1/16

        # mask_score_pool4c
        h = mask_score_pool4[:, :, 5:5 + mask_upscore2.data.shape[2],
                             5:5 + mask_upscore2.data.shape[3]]
        mask_score_pool4c = h  # 1/16

        # mask_fuse_pool4
        h = mask_upscore2 + mask_score_pool4c
        mask_fuse_pool4 = h  # 1/16

        # mask_upscore_pool4
        h = self.mask_upscore_pool4(mask_fuse_pool4)
        mask_upscore_pool4 = h  # 1/8

        # mask_score_pool3c
        h = mask_score_pool3[:, :, 9:9 + mask_upscore_pool4.data.shape[2],
                             9:9 + mask_upscore_pool4.data.shape[3]]
        mask_score_pool3c = h  # 1/8

        # mask_fuse_pool3
        h = mask_upscore_pool4 + mask_score_pool3c
        mask_fuse_pool3 = h  # 1/8

        # mask_upscore8
        h = self.mask_upscore8(mask_fuse_pool3)
        mask_upscore8 = h  # 1/1

        # mask_score
        h = mask_upscore8[:, :, 31:31 + rgb.shape[2], 31:31 + rgb.shape[3]]
        mask_score = h  # 1/1

        if return_pool5:
            return mask_score, rgb_pool5
        else:
            return mask_score
示例#35
0
 def __call__(self, x):
     h1 = F.relu(self.l1(x))
     h2 = F.relu(self.l2(h1))
     return self.l3(h2)
示例#36
0
 def __call__(self, x):
     # Using max_pooling -> ave_pooling
     # 1 Layer
     h = F.relu(self.conv1_1(x))
     h1 = F.relu(self.conv1_2(h))
     # 2 Layer
     # h  = F.max_pooling_2d(h1, ksize=2)
     h = F.average_pooling_2d(h1, ksize=2)
     h = F.relu(self.conv2_1(h))
     h2 = F.relu(self.conv2_2(h))
     # 3 Layer
     # h  = F.max_pooling_2d(h2, ksize=2)
     h = F.average_pooling_2d(h2, ksize=2)
     h = F.relu(self.conv3_1(h))
     h = F.relu(self.conv3_2(h))
     h3 = F.relu(self.conv3_3(h))
     # 4 Layer
     # h  = F.max_pooling_2d(h3, ksize=2)
     h = F.average_pooling_2d(h3, ksize=2)
     h = F.relu(self.conv4_1(h))
     h = F.relu(self.conv4_2(h))
     h4 = F.relu(self.conv4_3(h))
     # 5 Layer
     # h = F.max_pooling_2d(h4, ksize=2)
     h = F.average_pooling_2d(h4, ksize=2)
     h = F.relu(self.conv5_1(h))
     h = F.relu(self.conv5_2(h))
     h5 = F.relu(self.conv5_3(h))
     return h5
示例#37
0
 def __call__(self, x):
     h = F.relu(self.conv1(x))
     h = F.relu(self.conv2(h))
     h = F.relu(self.conv3(h))
     return h
示例#38
0
 def __call__(self, x):
     # データを受け取った際のforward計算を書く
     h1 = F.relu(self.l1(x))
     h2 = F.relu(self.l2(h1))
     return self.l3(h2)
示例#39
0
    def __call__(self, x, t=None):
        self.x = x
        self.t = t

        # conv1
        h = F.relu(self.conv1_1(x))
        conv1_1 = h
        h = F.relu(self.conv1_2(conv1_1))
        conv1_2 = h
        h = F.max_pooling_2d(conv1_2, 2, stride=2, pad=0)
        pool1 = h  # 1/2

        # conv2
        h = F.relu(self.conv2_1(pool1))
        conv2_1 = h
        h = F.relu(self.conv2_2(conv2_1))
        conv2_2 = h
        h = F.max_pooling_2d(conv2_2, 2, stride=2, pad=0)
        pool2 = h  # 1/4

        # conv3
        h = F.relu(self.conv3_1(pool2))
        conv3_1 = h
        h = F.relu(self.conv3_2(conv3_1))
        conv3_2 = h
        h = F.relu(self.conv3_3(conv3_2))
        conv3_3 = h
        h = F.max_pooling_2d(conv3_3, 2, stride=2, pad=0)
        pool3 = h  # 1/8

        # conv4
        h = F.relu(self.conv4_1(pool3))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        pool4 = h  # 1/16

        # conv5
        h = F.relu(self.conv5_1(pool4))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        pool5 = h  # 1/32

        # fc6
        h = F.relu(self.fc6(pool5))
        h = F.dropout(h, ratio=.5)
        fc6 = h  # 1/32

        # fc7
        h = F.relu(self.fc7(fc6))
        h = F.dropout(h, ratio=.5)
        fc7 = h  # 1/32

        # score_fr
        h = self.score_fr(fc7)
        score_fr = h  # 1/32

        # score_pool3
        h = self.score_pool3(pool3)
        score_pool3 = h  # 1/8

        # score_pool4
        h = self.score_pool4(pool4)
        score_pool4 = h  # 1/16

        # upscore2
        h = self.upscore2(score_fr)
        upscore2 = h  # 1/16

        # score_pool4c
        h = score_pool4[:, :, 5:5 + upscore2.data.shape[2],
                        5:5 + upscore2.data.shape[3]]
        score_pool4c = h  # 1/16

        # fuse_pool4
        h = upscore2 + score_pool4c
        fuse_pool4 = h  # 1/16

        # upscore_pool4
        h = self.upscore_pool4(fuse_pool4)
        upscore_pool4 = h  # 1/8

        # score_pool4c
        h = score_pool3[:, :, 9:9 + upscore_pool4.data.shape[2],
                        9:9 + upscore_pool4.data.shape[3]]
        score_pool3c = h  # 1/8

        # fuse_pool3
        h = upscore_pool4 + score_pool3c
        fuse_pool3 = h  # 1/8

        # upscore8
        h = self.upscore8(fuse_pool3)
        upscore8 = h  # 1/1

        # score
        h = upscore8[:, :, 31:31 + x.data.shape[2], 31:31 + x.data.shape[3]]
        self.score = h  # 1/1

        if t is None:
            assert not chainer.config.train
            return

        # testing with t or training
        self.loss = F.softmax_cross_entropy(self.score, t, normalize=False)
        chainer.report({'loss': self.loss})
        return self.loss
示例#40
0
文件: fcn16s.py 项目: jonaswgit/fcn
    def __call__(self, x, t=None):
        # conv1
        h = F.relu(self.conv1_1(x))
        conv1_1 = h
        h = F.relu(self.conv1_2(conv1_1))
        conv1_2 = h
        h = F.max_pooling_2d(conv1_2, 2, stride=2, pad=0)
        pool1 = h  # 1/2

        # conv2
        h = F.relu(self.conv2_1(pool1))
        conv2_1 = h
        h = F.relu(self.conv2_2(conv2_1))
        conv2_2 = h
        h = F.max_pooling_2d(conv2_2, 2, stride=2, pad=0)
        pool2 = h  # 1/4

        # conv3
        h = F.relu(self.conv3_1(pool2))
        conv3_1 = h
        h = F.relu(self.conv3_2(conv3_1))
        conv3_2 = h
        h = F.relu(self.conv3_3(conv3_2))
        conv3_3 = h
        h = F.max_pooling_2d(conv3_3, 2, stride=2, pad=0)
        pool3 = h  # 1/8

        # conv4
        h = F.relu(self.conv4_1(pool3))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        pool4 = h  # 1/16

        # conv5
        h = F.relu(self.conv5_1(pool4))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = F.max_pooling_2d(h, 2, stride=2, pad=0)
        pool5 = h  # 1/32

        # fc6
        h = F.relu(self.fc6(pool5))
        h = F.dropout(h, ratio=.5)
        fc6 = h  # 1/32

        # fc7
        h = F.relu(self.fc7(fc6))
        h = F.dropout(h, ratio=.5)
        fc7 = h  # 1/32

        # score_fr
        h = self.score_fr(fc7)
        score_fr = h  # 1/32

        # upscore2
        h = self.upscore2(score_fr)
        upscore2 = h  # 1/16

        # score_pool4
        h = self.score_pool4(pool4)
        score_pool4 = h  # 1/16

        # score_pool4c
        h = score_pool4[:, :, 5:5 + upscore2.shape[2], 5:5 + upscore2.shape[3]]
        score_pool4c = h  # 1/16

        # fuse_pool4
        h = upscore2 + score_pool4c
        fuse_pool4 = h  # 1/16

        # upscore16
        h = self.upscore16(fuse_pool4)
        upscore16 = h  # 1/1

        # score
        h = upscore16[:, :, 27:27 + x.shape[2], 27:27 + x.shape[3]]
        score = h  # 1/1
        self.score = score

        if t is None:
            assert not chainer.configuration.config.train
            return

        loss = F.softmax_cross_entropy(self.score,
                                       t,
                                       normalize=False,
                                       class_weight=self.class_weight)
        if np.isnan(float(loss.data)):
            raise ValueError('Loss value is nan.')
        chainer.report({'loss': loss}, self)
        return loss
示例#41
0
 def forward(self, x):
     """Compute Q-values of actions for given observations."""
     h = F.relu(self.l0(x))
     h = F.relu(self.l1(h))
     return self.l2(h)
 def __call__(self, x):
     h = self.base(x, layers=["pool5"])["pool5"]
     h = F.dropout(F.relu(self.fc_1(h)))
     h = self.fc_2(h)
     return h
示例#43
0
    def __call__(self, *x_list):
        #print (x_list.data.shape)
        #print (x_list.data[0])
        #print (x_list.data[1])
        ##1フレームごとfc7までを検出しLSTMに対してループ
        self.l8.reset_state()

        y_list = []
        for i in range(5):
            x = x_list[i]
            #x.volatile = 'on'
            #print (xd.shape)
            #xd = xd.asarray(xd).astype("f")
            #x = np.asarray(xd, dtype=np.float32)
            #print (x.data.shape)
            #print (x.shape)
            #x = np.asarray(xd).astype(np.float32)

            #print (i)
            h = F.dropout(F.relu(self.conv1_1(x)))
            h = F.dropout(F.relu(self.conv1_2(h)))
            h = F.max_pooling_2d(h, 2, stride=2)

            h = F.dropout(F.relu(self.conv2_1(h)))
            h = F.dropout(F.relu(self.conv2_2(h)))
            h = F.max_pooling_2d(h, 2, stride=1)

            #h = F.dropout(F.relu(self.conv3_1_b(h)))
            #h = F.relu(self.conv3_2_b(h))
            #h = F.relu(self.conv3_3_b(h))
            #h = F.max_pooling_2d(h, 2, stride=2)

            h = F.dropout(F.relu(self.conv4_1_b(h)))
            #h = F.relu(self.conv4_2(h))
            #h = F.relu(self.conv4_3(h))
            #h = F.max_pooling_2d(h, 2, stride=2)

            #h = F.relu(self.conv5_1(h))
            #h = F.relu(self.conv5_2(h))
            #h = F.relu(self.conv5_3(h))
            #h = F.max_pooling_2d(h, 2, stride=2)

            #h = F.dropout(F.relu(self.fc6(h)), train=self.train, ratio=0.5)
            #h = F.dropout(F.relu(self.fc7(h)), train=self.train, ratio=0.5)

            #h.volatile = 'off'
            #h = F.relu(self.fc6_b(h))
            #h = F.relu(self.fc7(h))

            l = self.l8(h)

            #print h.data
            #h = F.sigmoid(self.fc9(h))
            #h = (h*4)+1
            #print (h.data)
        h = self.fc9(l)

        if self.train:
            #self.loss = F.softmax_cross_entropy(h, t)
            #chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
            #print ("return loss")
            return h
            #return self.loss

        else:
            self.pred = F.softmax(l)
            return self.pred
示例#44
0
    def __call__(self, x, train):
        h = F.relu(self.bn1(self.conv1(x), test=not train))
        h = F.relu(self.bn2(self.conv2(h), test=not train))
        h = self.bn3(self.conv3(h), test=not train)

        return F.relu(h + x)
示例#45
0
 def __call__(self, x):
     h = F.relu(self.mid(x))
     y = self.out(h)
     return y
示例#46
0
 def __call__(self, x):
     h1 = F.max_pooling_2d(
         F.relu(self.conv1(F.reshape(x, (-1, 1, 28, 28)))), 2)
     h2 = F.max_pooling_2d(F.relu(self.conv2(h1)), 2)
     h3 = F.relu(self.l1(h2))
     return self.l2(h3)
 def __call__(self, x):
     for l in self.layers[:-1]:
         x = F.relu(l(x))
     return self.layers[-1](x)
	def fwd( self, x ):
		h1 = F.dropout( F.relu( self.l1(x) ) )
		h2 = F.dropout( F.relu( self.l2(h1) ) )
		h3 = self.l3(h2)
		return h3
示例#49
0
 def __call__(self, x):
     sentences = x
     h = F.relu(F.dropout(self.l_input(sentences), 0.5))
     h = F.relu(F.dropout(self.l_merge(h), 0.5))
     h = F.relu(F.dropout(self.l_merge2(h), 0.5))
     return h
示例#50
0
 def __call__(self, x):
     h = F.relu(self.fc1(x))
     h = F.relu(self.fc2(h))
     y = self.fc3(h)
     return y
示例#51
0
def forward(x_data, t_data, train=True):
    # Neural net architecture
    x = chainer.Variable(x_data)
    t = chainer.Variable(t_data)
    y = F.relu(model.l1(x))
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
示例#52
0
 def forward(self, x):
     y1 = F.relu(x)
     return y1
示例#53
0
    def __call__(self, img, sparse_inputs):

        # Downsampling layers
        h = F.relu(self.conv1(np.concatenate((img, sparse_inputs), axis=1)))

        # Dense1
        x = np.resize(sparse_inputs, (1, 2, 240, 320))
        h = F.relu(self.d1_conv1(h))
        h = self.dense1(h, x)
        h = F.relu(self.d1_conv2(h))

        # Skip Dense 2
        _, _, H, W = x.shape
        x120 = np.resize(x, (1, 2, H // 2, W // 2))
        skip_h2 = F.relu(self.d2c_conv1(h))
        skip_h2 = self.dense2c(skip_h2, x120)
        skip_h2 = F.relu(self.d2c_conv2(skip_h2))

        h = F.relu(self.d2_conv1(h))
        h = self.dense2(h, x120)
        h = F.relu(self.d2_conv2(h))

        # Skip Dense 3
        _, _, H, W = x120.shape
        x60 = np.resize(x120, (1, 2, H // 2, W // 2))
        skip_h3 = F.relu(self.d3c_conv1(h))
        skip_h3 = self.dense3c(skip_h3, x60)
        skip_h3 = F.relu(self.d3c_conv2(skip_h3))

        h = F.relu(self.d3_conv1(h))
        h = self.dense3(h, x60)
        h = F.relu(self.d3_conv2(h))

        # Skip Dense 4
        _, _, H, W = x60.shape
        x30 = np.resize(x60, (1, 2, H // 2, W // 2))
        skip_h4 = F.relu(self.d4c_conv1(h))
        skip_h4 = self.dense4c(skip_h4, x30)
        skip_h4 = F.relu(self.d4c_conv2(skip_h4))

        h = F.relu(self.d4_conv1(h))
        h = self.dense4(h, x30)
        h = F.relu(self.d4_conv2(h))

        # Dense 5
        _, _, H, W = x30.shape
        x15 = np.resize(x30, (1, 2, H // 2, W // 2))
        b, c, H, W = x15.shape
        x16 = np.empty((b, c, H + 1, W), dtype=np.float32)
        x16[:, :, :-1, ...] = x15
        x16[:, :, -1, ...] = x15[:, :, -1]
        h = F.relu(self.d5_conv1(h))
        h = self.dense5(h, x16)
        h = F.relu(self.d5_conv2(h))

        # UpDense 1
        print(h.shape)
        h = F.relu(self.up1_conv1(h))
        print(h.shape)
        h = self.up_dense1(h, x16)
        h = F.relu(self.up1_conv2(h))

        h = F.concat((h, skip_h4), axis=1)

        # UpDense 2
        print(h.shape)
        h = F.relu(self.up2_conv1(h))
        print(h.shape)
        # h = self.up_dense2(h, new_x)
        # h = F.relu(self.up2_conv2(h))
        # print(h.shape)

        # # UpDense 3
        # h = F.relu(self.up3_conv1(h))
        # h = self.up_dense3(h, new_x)
        # h = F.relu(self.up3_conv2(h))

        # # UpDense 4
        # h = F.relu(self.up4_conv1(h))
        # h = self.up_dense4(h, new_x)
        # h = F.relu(self.up4_conv2(h))

        return h
示例#54
0
    def forward(self, x, train, action, init_l=None):
        if init_l is None:
            # Location Net @t-1
            m = F.tanh(self.fc_hl(self.h))

            if train:
                eps = (self.xp.random.normal(0, 1, size=m.data.shape)).astype(
                    np.float32)
                l = m.data + np.sqrt(self.var) * eps
                # do not backward reinforce loss via l

                # log(location policy)
                ln_pi = -0.5 * F.sum((l - m) * (l - m), axis=1) / self.var
                with chainer.using_config('enable_backprop', train):
                    l = chainer.Variable(l)
            else:
                l = m
                ln_pi = None
        else:
            l = init_l
            ln_pi = None

        # Retina Encoding
        x.volatile = 'on'  # do not backward
        if self.xp == np:
            loc = l.data
        else:
            loc = self.xp.asnumpy(l.data)
        rho = crop(x, center=loc, size=self.g_size)

        # multi-scale glimpse
        for k in range(1, self.n_scales):
            s = np.power(2, k)
            patch = crop(x, center=loc, size=self.g_size * s)
            patch = F.average_pooling_2d(patch, ksize=s)
            rho = F.concat((rho, patch), axis=1)
        if train: rho.volatile = 'off'  # backward up to link emb_x

        hg = F.relu(self.emb_x(rho))

        # Location Encoding
        hl = F.relu(self.emb_l(l))

        # Glimpse Net
        g = F.relu(self.fc_lg(hl) + self.fc_xg(hg))

        # Core Net
        if self.use_lstm:
            self.h = self.core_lstm(g)
        else:
            self.h = F.relu(self.core_hh(self.h) + self.core_gh(g))

        # Action Net
        if action:
            y = self.fc_ha(self.h)
        else:
            y = None

        # Baseline
        if train and action:
            b = F.sigmoid(self.fc_hb(self.h))
            b = F.reshape(b, (-1, ))
        else:
            b = None

        return l, ln_pi, y, b
示例#55
0
 def __call__(self, x, t):
     h1 = self.l1(x)
     y = self.l3(F.relu(self.l2(F.relu(self.l1(x)))))
     # self.loss = self.listwise_cost(y_data, t_data)
     self.loss = self.jsd(t, y)
     return self.loss
示例#56
0
 def __call__(self, x):
     h = F.max_pooling_2d(F.relu(self.conv1(x)), 2, 2)
     h = F.max_pooling_2d(F.relu(self.conv2(h)), 2, 2)
     h = F.dropout(F.relu(self.conv3(h)), ratio=self.dropout)
     h = F.dropout(F.relu(self.fc4(h)), ratio=self.dropout)
     return h
示例#57
0
    def __call__(self, input_blob, test_mode=False):
        # explicit and very flexible DAG!
        #################################
        data = input_blob[0]
        labels = input_blob[1]

        if (len(input_blob) >= 3):
            weights_classes = input_blob[2]
        else:
            weights_classes = chainer.Variable(
                cuda.cupy.ones((self.classes, 1), dtype='float32'))

        # ---- CONTRACTION BLOCKS ---- #
        # B1
        #ipdb.set_trace()
        cblob = self.conv1_1(data)
        cblob = F.relu(cblob)
        cblob = self.conv1_2(cblob)
        cblob = F.relu(cblob)
        cblob = F.max_pooling_2d(cblob, (2, 2), stride=(2, 2), pad=(0, 0))

        # B2
        cblob = self.conv2_1(cblob)
        cblob = F.relu(cblob)
        cblob = self.conv2_2(cblob)
        cblob = F.relu(cblob)
        cblob = F.max_pooling_2d(cblob, (2, 2), stride=(2, 2), pad=(0, 0))

        # B3
        cblob = self.conv3_1(cblob)
        cblob = F.relu(cblob)
        cblob = self.conv3_2(cblob)
        cblob = F.relu(cblob)
        cblob = self.conv3_3(cblob)
        cblob = F.relu(cblob)
        cblob_mp3 = F.max_pooling_2d(cblob, (2, 2), stride=(2, 2), pad=(0, 0))

        # B4
        cblob = self.conv4_1(cblob_mp3)
        cblob = F.relu(cblob)
        cblob = self.conv4_2(cblob)
        cblob = F.relu(cblob)
        cblob = self.conv4_3(cblob)
        cblob = F.relu(cblob)
        cblob_mp4 = F.max_pooling_2d(cblob, (2, 2), stride=(2, 2), pad=(0, 0))

        # B5
        cblob = self.conv5_1(cblob_mp4)
        cblob = F.relu(cblob)
        cblob = self.conv5_2(cblob)
        cblob = F.relu(cblob)
        cblob = self.conv5_3(cblob)
        cblob = F.relu(cblob)
        cblob = F.max_pooling_2d(cblob, (2, 2), stride=(2, 2), pad=(0, 0))

        # FCs
        #cblob = self.conv_aux(cblob)
        #cblob = self.conv_auxb(cblob)
        cblob = self.fc_6(cblob)
        cblob = F.relu(cblob)
        cblob = F.dropout(cblob, ratio=0.5, train=not test_mode)
        cblob = self.fc_7(cblob)
        cblob = F.relu(cblob)
        cblob = F.dropout(cblob, ratio=0.5, train=not test_mode)
        cblob = self.conv_aux2(cblob)

        # ---- EXPANSION BLOCKS ---- #
        cblob = self.score2(cblob)
        cblob_aux1 = self.score_pool4(cblob_mp4)
        cblob = F.sum_binary(cblob, cblob_aux1)
        scoreup = self.score4(cblob)

        cblob_aux2 = self.score_pool3(cblob_mp3)
        cblob = F.sum_binary(scoreup, cblob_aux2)
        cblob = self.upsample(cblob)

        # ---- SOFTMAX CLASSIFIER ---- #
        self.blob_class = self.classi(cblob)
        self.probs = F.softmax(self.blob_class)

        # ---- WEIGHTED CROSS-ENTROPY LOSS ---- #
        #ipdb.set_trace()
        self.output_point = self.probs

        if (test_mode != 2):
            self.loss = F.weighted_cross_entropy(self.probs,
                                                 labels,
                                                 weights_classes,
                                                 normalize=True)
            return self.loss
        else:
            return 0
示例#58
0
 def predict(self, x):
     h1 = F.relu(self.l1(x))
     h2 = F.relu(self.l2(h1))
     h = F.relu(self.l3(h2))
     return h.data
示例#59
0
 def __call__(self, x, train=False):
     return F.relu(self.bn(self.conv(x), test=not train))
示例#60
0
    def __call__(self, x, y):
        if y is not None:
            data_ab_ss = self.data_ab_ss(y)
            gt_ab_313 = self.nn_enc_layer.forward(data_ab_ss.data)
            gt_ab_313_va = chainer.Variable(gt_ab_313)

        # non_gray_mask = self.non_gray_mask_layer.forward(data_ab_ss)
        # prior_boost = self.prior_boost_layer.forward(gt_ab_313)

        # prior_boost_nongray = prior_boost * non_gray_mask

        h = F.relu(self.conv1_1(x))
        h = self.conv1_2norm(F.relu(self.conv1_2(h)))

        h = F.relu(self.conv2_1(h))
        h = self.conv2_2norm(F.relu(self.conv2_2(h)))

        h = F.relu(self.conv3_1(h))
        h = F.relu(self.conv3_2(h))
        h = F.relu(self.conv3_3(h))
        h = self.conv3_3norm(h)

        h = F.relu(self.conv4_1(h))
        h = F.relu(self.conv4_2(h))
        h = F.relu(self.conv4_3(h))
        h = self.conv4_3norm(h)

        h = F.relu(self.conv5_1(h))
        h = F.relu(self.conv5_2(h))
        h = F.relu(self.conv5_3(h))
        h = self.conv5_3norm(h)

        h = F.relu(self.conv6_1(h))
        h = F.relu(self.conv6_2(h))
        h = F.relu(self.conv6_3(h))
        h = self.conv6_3norm(h)

        h = F.relu(self.conv7_1(h))
        h = F.relu(self.conv7_2(h))
        h = F.relu(self.conv7_3(h))
        h = self.conv7_3norm(h)

        h = F.relu(self.conv8_1(h))
        h = F.relu(self.conv8_2(h))
        h = F.relu(self.conv8_3(h))

        h = F.relu(self.conv313(h))

        if y is not None:
            print(h.shape, gt_ab_313_va.shape)
            loss = F.softmax_cross_entropy(h, gt_ab_313_va)
            chainer.report({"main/loss": loss})
        else:
            return gt_ab_313_va