示例#1
0
 def __init__(self,
              max_val=1.0,
              power_factors=(0.0448, 0.2856, 0.3001, 0.2363, 0.1333),
              filter_size=11,
              filter_sigma=1.5,
              k1=0.01,
              k2=0.03):
     super(MSSSIM, self).__init__()
     validator.check_value_type('max_val', max_val, [int, float],
                                self.cls_name)
     validator.check_number('max_val', max_val, 0.0, Rel.GT, self.cls_name)
     self.max_val = max_val
     validator.check_value_type('power_factors', power_factors,
                                [tuple, list], self.cls_name)
     self.filter_size = validator.check_int(filter_size, 1, Rel.GE,
                                            'filter_size', self.cls_name)
     self.filter_sigma = validator.check_positive_float(
         filter_sigma, 'filter_sigma', self.cls_name)
     self.k1 = validator.check_value_type('k1', k1, [float], self.cls_name)
     self.k2 = validator.check_value_type('k2', k2, [float], self.cls_name)
     window = _create_window(filter_size, filter_sigma)
     self.level = len(power_factors)
     self.conv = []
     for i in range(self.level):
         self.conv.append(_conv2d(1, 1, filter_size, Tensor(window)))
         self.conv[i].weight.requires_grad = False
     self.multi_convs_list = CellList(self.conv)
     self.weight_tensor = Tensor(power_factors, mstype.float32)
     self.avg_pool = AvgPool2d(kernel_size=2, stride=2, pad_mode='valid')
     self.relu = ReLU()
     self.reduce_mean = P.ReduceMean()
     self.prod = P.ReduceProd()
     self.pow = P.Pow()
     self.stack = P.Stack(axis=-1)
     self.concat = P.Concat(axis=1)
示例#2
0
 def __init__(self,
              weight1,
              weight2,
              axis=0,
              strategy1=None,
              strategy2=None):
     super(Net1, self).__init__()
     self.pack = P.Stack(axis=axis).shard(strategy1)
     self.mul = P.Mul().shard(strategy2)
     self.weight1 = Parameter(weight1, "w1")
     self.weight2 = Parameter(weight2, "w2")
示例#3
0
 def __init__(self,
              weight1,
              weight2,
              axis=0,
              strategy1=None,
              strategy2=None,
              is_parameter=True):
     super(Net, self).__init__()
     self.pack = P.Stack(axis=axis).shard(strategy1)
     self.mul = P.Mul().shard(strategy2)
     if is_parameter:
         self.weight1 = Parameter(weight1, "w1")
     else:
         self.weight1 = weight1
     self.weight2 = Parameter(weight2, "w2")
示例#4
0
    def construct(self, x, attention_mask, layer_past=None):
        """
        self-attention

        Inputs:
            x: output of previous layer
            attention_mask: the attention mask matrix with shape (batch_size, 1, seq_length, seq_length)
            layer_past: the previous feature map

        Returns:
            output: Tensor, the output logit of this layer
            layer_present: Tensor, the feature map of current layer
        """

        original_shape = F.shape(x)
        x = F.reshape(x, (-1, original_shape[-1]))
        query = self.dense1(x)
        key = self.dense2(x)
        value = self.dense3(x)
        query = self.transpose(
            F.reshape(
                query,
                (-1, original_shape[1], self.n_head, self.size_per_head)),
            (0, 2, 1, 3))
        key = self.transpose(
            F.reshape(
                key, (-1, original_shape[1], self.n_head, self.size_per_head)),
            (0, 2, 3, 1))
        value = self.transpose(
            F.reshape(
                value,
                (-1, original_shape[1], self.n_head, self.size_per_head)),
            (0, 2, 1, 3))
        if self.use_past:
            past_value = layer_past[1]
            past_key = self.transpose(layer_past[0], (0, 1, 3, 2))
            key = self.concat_k((past_key, key))
            value = self.concat_v(past_value, value)
        layer_present = P.Stack()([self.transpose(key, (0, 1, 3, 2)), value])
        attention = self._attn(query, key, value, attention_mask)
        attention_merge = self.merge_heads(attention)
        output = self.projection(attention_merge)
        output = self.dropout(output)
        return output, layer_present
示例#5
0
 def __init__(self,
              dense_in_channel,
              dense_out_channel,
              axis=0,
              shape=None,
              strategy=None):
     super().__init__()
     weight_np = np.full((dense_out_channel, dense_in_channel),
                         0.01,
                         dtype=np.float32)
     bias_np = np.full((dense_out_channel), 0.01, dtype=np.float32)
     self.pack_con = Tensor(np.full(shape, 0.01, dtype=np.float32))
     self.flat = Flatten()
     self.dense = Dense(in_channels=dense_in_channel,
                        out_channels=dense_out_channel,
                        weight_init=Tensor(weight_np),
                        bias_init=Tensor(bias_np),
                        has_bias=True)
     self.mul = P.Mul()
     self.pack = P.Stack(axis)
     if strategy is not None:
         self.pack.shard(strategy)
示例#6
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================

from mindspore.ops import operations as P
from mindspore.ops import Primitive

stack = P.Stack()
concat = P.Concat()
make_tuple = Primitive('MakeTuple')


class FnDict:
    def __init__(self):
        self.fnDict = {}

    def __call__(self, fn):
        self.fnDict[fn.__name__] = fn

    def __getitem__(self, name):
        return self.fnDict[name]

示例#7
0
def stack(inputs: List[Tensor], axis: int) -> Tensor:
    """Stacks a list of tensors in specified axis."""
    stack_op = op.Stack(axis)
    outputs = stack_op(inputs)
    return outputs
示例#8
0
 def __init__(self):
     super(PackNet, self).__init__()
     self.stack = P.Stack()
示例#9
0
 def __init__(self, x, axis):
     super(Net, self).__init__()
     self.stack = P.Stack(axis)
     self.x = x