コード例 #1
0
    def forward(self, inputs, drop_connect_rate=None):
        """
        :param inputs: input tensor
        :param drop_connect_rate: drop connect rate (float, between 0 and 1)
        :return: output of block
        """
        # Expansion and Depthwise Convolution
        x = inputs
        if self.expand_ratio != 1:
            x = relu_fn(self._bn0(self._expand_conv(inputs)))
        x = relu_fn(self._bn1(self._depthwise_conv(x)))

        # Squeeze and Excitation
        if self.has_se:
            x_squeezed = F.adaptive_avg_pool2d(x, 1)
            x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed)))
            x = torch.sigmoid(x_squeezed) * x

        x = self._bn2(self._project_conv(x))
        # Skip connection and drop connect
        if self.id_skip and self.stride == 1 and self.input_filters == self.output_filters:
            if drop_connect_rate:
                x = drop_connect(x,
                                 p=drop_connect_rate,
                                 training=self.training)
            x = x + inputs  # skip connection
        return x
コード例 #2
0
    def forward(self, inputs):
        """ Returns output of the final convolution layer """

        backbone_indices = getattr(self, 'backbone_indices', None)
        if not backbone_indices:
            raise ValueError('no backbone indices, something went wrong!')

        # Stem
        x = relu_fn(self._bn0(self._conv_stem(inputs)))

        features = []
        # Blocks
        for idx, block in enumerate(self._blocks):
            drop_connect_rate = self._global_params.drop_connect_rate
            if drop_connect_rate:
                drop_connect_rate *= float(idx) / len(self._blocks)
            x = block(x, drop_connect_rate=drop_connect_rate)

            if idx in backbone_indices:
                features.insert(0, x)

        # Head
        x = relu_fn(self._bn1(self._conv_head(x)))
        features.insert(0, x)

        return features
コード例 #3
0
    def forward(self, inputs):
        # Stem
        x = relu_fn(self._bn0(self._conv_stem(inputs)))

        outputs = []

        # Blocks
        for idx, block in enumerate(self._blocks):
            drop_connect_rate = self._global_params.drop_connect_rate
            if drop_connect_rate:
                drop_connect_rate *= float(idx) / len(self._blocks)
            x = block(x, drop_connect_rate=drop_connect_rate)
            outputs.append(x)

        # Head
        x = relu_fn(self._bn1(self._conv_head(x)))

        result = []
        i = 0
        r = list(reversed([80, 40, 24, 16]))  # [592, 296, 152, 80, 35, 32]
        for l in outputs:
            sz = l.shape
            if i < len(r) and sz[1] == r[i]:
                result.append(l)
                i = i + 1
        return x, (*reversed(result)),
コード例 #4
0
ファイル: models.py プロジェクト: AdejuwonF/intphys-renderer
 def forward(self, inputs):
     x = relu_fn(self.model._bn0(self.model._conv_stem(inputs)))
     global_features = []
     for idx, block in enumerate(self.model._blocks):
         drop_connect_rate = self.model._global_params.drop_connect_rate
         if drop_connect_rate:
             drop_connect_rate *= float(idx) / len(self.model._blocks)
         x = block(x, drop_connect_rate=drop_connect_rate)
         if idx in self.idx_list:
             global_features.append(x)
     x = relu_fn(self.model._bn1(self.model._conv_head(x)))
     global_features.append(x)
     global_features.reverse()
     return global_features
コード例 #5
0
    def extract_features(self, inputs):
        """ Returns output of the final convolution layer """

        # Stem
        x = relu_fn(self._bn0(self._conv_stem(inputs)))

        # Blocks
        self.output_blocks = []
        for idx, block in enumerate(self._blocks):
            drop_connect_rate = self._global_params.drop_connect_rate
            if drop_connect_rate:
                drop_connect_rate *= float(idx) / len(self._blocks)
            x = block(x, drop_connect_rate=drop_connect_rate)
            self.output_blocks.append(x)
        # Head
        x = relu_fn(self._bn1(self._conv_head(x)))

        return self.output_blocks
コード例 #6
0
ファイル: model.py プロジェクト: nvvaulin/icevision2019
def get_feature_layer(self, inputs):
    """ Returns output of the final convolution layer """
    x = relu_fn(self._bn0(self._conv_stem(inputs)))
    
    for idx, block in enumerate(self._blocks):
        drop_connect_rate = self._global_params.drop_connect_rate
        if drop_connect_rate:
            drop_connect_rate *= float(idx) / len(self._blocks)
        x = block(x, drop_connect_rate=drop_connect_rate)
    return x
コード例 #7
0
    def extract_features(self, inputs):
        ops, total_ops = 0., 0.

        # conv_stem is not quantized
        x, delta_ops, delta_ops_total = self._conv_stem(inputs,
                                                        is_not_quantized=True)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total

        # still no quantization whatsoever
        delta_ops, delta_ops_total = ops_bn(x, is_not_quantized=True)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total

        delta_ops, delta_ops_total = ops_non_linearity(x,
                                                       is_not_quantized=True)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
        x = relu_fn(x)

        # quantization appears in these blocks:
        for idx, block in enumerate(self._blocks):
            drop_connect_rate = self._global_params.drop_connect_rate
            if drop_connect_rate:
                drop_connect_rate *= float(idx) / len(self._blocks)
            x, delta_ops, delta_ops_total = block(x, drop_connect_rate)
            ops, total_ops = ops + delta_ops, total_ops + delta_ops_total

        x, delta_ops, delta_ops_total = self._conv_head(x,
                                                        is_not_quantized=False)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total

        delta_ops, delta_ops_total = ops_bn(x, is_not_quantized=False)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
        x = self._bn1(x)

        delta_ops, delta_ops_total = ops_non_linearity(x,
                                                       is_not_quantized=False)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
        x = relu_fn(x)

        return x, ops, total_ops
コード例 #8
0
    def forward(self, x):
        result = []
        x = relu_fn(self._bn0(self._conv_stem(x)))
        result.append(x)

        skip_connection_idx = 0
        for idx, block in enumerate(self._blocks):
            drop_connect_rate = self._global_params.drop_connect_rate
            if drop_connect_rate:
                drop_connect_rate *= float(idx) / len(self._blocks)
            x = block(x, drop_connect_rate=drop_connect_rate)
            if idx == self._skip_connections[skip_connection_idx] - 1:
                skip_connection_idx += 1
                result.append(x)

        return list(reversed(result))
コード例 #9
0
    def forward(self, inputs):
        """ return a list of tensor each half in size as previous one
            e.g. (5,16,128,128) -> (5,24,64,64)...
        """
        x_list = [inputs if self.includeX0 else None]
        x = relu_fn(self._bn0(self._conv_stem(inputs)))
        for idx, block in enumerate(self._blocks):
            if idx in self._layers:
                x_list.append(x)
            drop_connect_rate = self._global_params.drop_connect_rate
            if drop_connect_rate:
                drop_connect_rate *= float(idx) / len(self._blocks)
            x = block(x, drop_connect_rate=drop_connect_rate)

        x_list.append(x)
        return x_list
コード例 #10
0
ファイル: efnet.py プロジェクト: nvvaulin/icevision2019
    def forward(self, inputs):

        outputs = []

        # Stem
        x = relu_fn(self._bn0(self._conv_stem(inputs)))

        # Blocks
        for idx, block in enumerate(self._blocks):
            drop_connect_rate = self._global_params.drop_connect_rate
            if drop_connect_rate:
                drop_connect_rate *= float(idx) / len(self._blocks)
            x = block(x, drop_connect_rate=drop_connect_rate)
            if idx in self._features_idx:
                outputs.append(x)
        return outputs
コード例 #11
0
    def forward(self, inputs):

        index = [31, 21, 9, 5]
        out = []
        x = relu_fn(self.model._bn0(self.model._conv_stem(inputs)))

        # Blocks
        for idx, block in enumerate(self.model._blocks):
            drop_connect_rate = self.model._global_params.drop_connect_rate
            if drop_connect_rate:
                drop_connect_rate *= float(idx) / len(self.model._blocks)
            x = block(x, drop_connect_rate=drop_connect_rate)
            if idx in index:
                out.append(x)

        # # for debug show
        # for index, t in enumerate(out):
        #     print(index, t.shape)
        # # Head
        # x = relu_fn(self.model._bn1(self.model._conv_head(x)))

        return out
コード例 #12
0
    def forward(self, inputs, drop_connect_rate=None):
        """
        Forward run of the block, see comments to EfficientNet.forward for clarification.
        """

        ops, total_ops = 0., 0.

        x = inputs
        if self._block_args.expand_ratio != 1:
            x, delta_ops, delta_ops_total = self._expand_conv(
                inputs, is_not_quantized=False)
            ops, total_ops = ops + delta_ops, total_ops + delta_ops_total

            delta_ops, delta_ops_total = ops_bn(x, is_not_quantized=False)
            ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
            x = self._bn0(x)

            delta_ops, delta_ops_total = ops_non_linearity(
                x, is_not_quantized=False)
            ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
            x = relu_fn(x)

        x, delta_ops, delta_ops_total = self._depthwise_conv(
            x, is_not_quantized=False)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total

        delta_ops, delta_ops_total = ops_bn(x, is_not_quantized=False)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
        x = self._bn1(x)

        delta_ops, delta_ops_total = ops_non_linearity(x,
                                                       is_not_quantized=False)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
        x = relu_fn(x)

        if self.has_se:
            delta_ops, delta_ops_total = ops_adaptive_avg_pool(
                x, is_not_quantized=False)
            ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
            x_squeezed = F.adaptive_avg_pool2d(x, 1)

            x_squeezed, delta_ops, delta_ops_total = self._se_reduce(
                x_squeezed, is_not_quantized=False)
            ops, total_ops = ops + delta_ops, total_ops + delta_ops_total

            delta_ops, delta_ops_total = ops_non_linearity(
                x_squeezed, is_not_quantized=False)
            ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
            x_squeezed = relu_fn(x_squeezed)

            x_squeezed, delta_ops, delta_ops_total = self._se_expand(
                x_squeezed, is_not_quantized=False)
            ops, total_ops = ops + delta_ops, total_ops + delta_ops_total

            delta_ops, delta_ops_total = ops_non_linearity(
                x, is_not_quantized=False)
            ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
            x = torch.sigmoid(x_squeezed) * x

        x, delta_ops, delta_ops_total = self._project_conv(
            x, is_not_quantized=False)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total

        delta_ops, delta_ops_total = ops_bn(x, is_not_quantized=False)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
        x = self._bn2(x)

        input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters
        if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters:
            if drop_connect_rate:
                x = drop_connect(x,
                                 p=drop_connect_rate,
                                 training=self.training)
            x = x + inputs  # skip connection

        delta_ops, delta_ops_total = ops_non_linearity(x,
                                                       is_not_quantized=False)
        ops, total_ops = ops + delta_ops, total_ops + delta_ops_total
        return x, ops, total_ops