예제 #1
0
 def __init__(self, num_segments, dyn_a=True, dyn_b=True):
     super(UnsortedSegmentSumDynNet, self).__init__()
     self.unsorted_segment_sum = P.UnsortedSegmentSum()
     self.gpu_convert_to_dynamic_shape = inner.GpuConvertToDynamicShape()
     self.num_segments = num_segments
     self.to_dyn_1 = dyn_a
     self.to_dyn_2 = dyn_b
예제 #2
0
def rowtensor_deduplicate_indices_slices(grad):
    """Unique the indices and sums the 'values' corresponding to the duplicate indices."""
    indices = grad.indices
    values = grad.values

    unique_indices, index_position = P.Unique()(indices)
    summed_values = P.UnsortedSegmentSum()(values, index_position, P.DynamicShape()(unique_indices)[0])

    return RowTensor(unique_indices, summed_values, grad.dense_shape)
예제 #3
0
    def __init__(self,
                 vocab_size,
                 embedding_size,
                 field_size,
                 param_init='normal',
                 target='CPU',
                 slice_mode='batch_slice',
                 feature_num_list=None,
                 max_norm=None,
                 sparse=True,
                 operator='SUM'):
        super(MultiFieldEmbeddingLookup,
              self).__init__(vocab_size, embedding_size, param_init, target,
                             slice_mode, feature_num_list, max_norm, sparse)
        self.field_size = validator.check_positive_int(field_size,
                                                       'field_size')
        self.operator = operator

        self.mul = P.Mul()
        self.inf_mask_mul = P.Mul()
        self.bias_add = P.Add()
        self.inf_add = P.Add()
        self.merge_op = None
        self.count_op = P.UnsortedSegmentSum()
        self.abs = P.Abs()
        self.equal = P.Equal()
        self.add = P.Add()
        self.cast = P.Cast()
        self.div_no_nan = P.DivNoNan()
        self.expand = P.ExpandDims()
        self.max_mask_mul = P.Mul()
        self.max_no_equal = P.NotEqual()

        if operator == MultiFieldEmbeddingLookup.OPERATOR_SUM:
            self.merge_op = P.UnsortedSegmentSum()
        elif operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
            self.merge_op = P.UnsortedSegmentMax()
        elif operator == MultiFieldEmbeddingLookup.OPERATOR_MEAN:
            self.merge_op = P.UnsortedSegmentSum()
        else:
            raise ValueError(
                "The operator supports ['SUM', 'MAX', 'MEAN'], but found: " +
                str(operator))

        parallel_mode = _get_parallel_mode()
        is_auto_parallel = parallel_mode in (ParallelMode.SEMI_AUTO_PARALLEL,
                                             ParallelMode.AUTO_PARALLEL)
        if slice_mode in ["table_row_slice", "batch_slice"
                          ] and is_auto_parallel:
            self.merge_op.shard(
                ((get_group_size(), 1, 1), (get_group_size(), 1)))
            self.expand.shard(((get_group_size(), ), ))
            self.bias_add.shard(((1, 1), (1, 1)))
            self.mul.shard(
                ((get_group_size(), 1, 1), (get_group_size(), 1, 1)))
            self.count_op.shard(((get_group_size(), 1), (get_group_size(), 1)))
            self.add.shard(((get_group_size(), ), (get_group_size(), )))
            self.div_no_nan.shard(
                ((get_group_size(), 1), (get_group_size(), 1)))
            self.max_mask_mul.shard(
                ((get_group_size(), 1), (get_group_size(), 1)))
            self.max_no_equal.shard(((1, ), ()))
            if operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
                self.equal.shard(((get_group_size(), 1, 1), ()))
                self.inf_mask_mul.shard(((get_group_size(), 1, 1), ()))
                self.merge_op.shard(
                    ((get_group_size(), 1), (get_group_size(), )))
                self.count_op.shard(
                    ((get_group_size(), ), (get_group_size(), )))
                self.inf_add.shard(
                    ((get_group_size(), 1, 1), (get_group_size(), 1, 1)))
        elif slice_mode == "table_column_slice" and is_auto_parallel:
            self.merge_op.shard(((1, 1, get_group_size()), (1, 1)))
            self.div_no_nan.shard(((1, get_group_size()), (1, 1)))
            self.bias_add.shard(((1, 1), (1, 1)))
            self.mul.shard(((1, 1, 1), (1, 1, get_group_size())))
            self.count_op.shard(((1, 1), (1, 1)))
            self.add.shard(((1, ), (1, )))
            self.max_mask_mul.shard(((1, get_group_size()), (1, 1)))
            self.expand.shard(((1, ), ))
            self.max_no_equal.shard(((1, ), ()))
            if operator == MultiFieldEmbeddingLookup.OPERATOR_MAX:
                self.equal.shard(((1, 1, 1), ()))
                self.inf_mask_mul.shard(((1, 1, 1), ()))
                self.merge_op.shard(((1, get_group_size()), (1, )))
                self.count_op.shard(((1, ), (1, )))
                self.inf_add.shard(((1, 1, get_group_size()), (1, 1, 1)))
        else:
            if is_auto_parallel:
                raise ValueError(
                    "slice_mode should be  ['table_row_slice', 'batch_slice' and \
                       'table_column_slice'], but get " + str(slice_mode))

        # Min value for fp32
        self.negative_inf_value = -3.402823466E+38
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
from mindspore.ops import Primitive
from mindspore.ops import operations as P

make_tuple = Primitive('make_tuple')
tuple_getitem = Primitive('tuple_getitem')
unsorted_segment_sum = P.UnsortedSegmentSum()
num_segments = 4
padding = Primitive('Padding')
op_slice = Primitive('Slice')
op_unsorted_segment_sum = Primitive('UnsortedSegmentSum')


class FnDict:
    def __init__(self):
        self.fnDict = {}

    def __call__(self, fn):
        self.fnDict[fn.__name__] = fn

    def __getitem__(self, name):
        return self.fnDict[name]
예제 #5
0
     'block': P.GatherV2(),
     'desc_const': [1],
     'desc_inputs': [[32, 5, 1024], Tensor(np.array([3]).astype(np.int32))],
     'desc_bprop': [[32, 1, 1024]]}),
 ('GatherV2_5', {
     'block': P.GatherV2(),
     'desc_const': [-1],
     'desc_inputs': [[3, 1, 3], Tensor(np.array([0, 1]).astype(np.int32))],
     'desc_bprop': [[3, 1, 2]]}),
 ('GatherV2_6', {
     'block': P.GatherV2(),
     'desc_const': [0],
     'desc_inputs': [[1152], Tensor(np.array(10).astype(np.int32))],
     'desc_bprop': [Tensor(np.array(10).astype(np.float32))]}),
 ('UnsortedSegmentSum', {
     'block': P.UnsortedSegmentSum(),
     'desc_const': [1280],
     'desc_inputs': [[1280,1024], Tensor(np.ones(1280).astype(np.int32))],
     'desc_bprop': [[8192,1024]],
     'skip': ['backward']}),
 ('UnsortedSegmentSum_1', {
     'block': P.UnsortedSegmentSum(),
     'desc_const': [4],
     'desc_inputs': [[3, 2, 1, 3], Tensor(np.array([[0, 1], [0, 1], [0, 1]]).astype(np.int32))],
     'desc_bprop': [[4, 1, 3]],
     'skip': ['backward']}),
 ('DropoutGenMask', {
     'block': P.DropoutGenMask(),
     'desc_const': [(2, 2), Tensor(0.5, mstype.float32)],
     'desc_inputs': [],
     'desc_bprop': [Tensor(np.ones(1).astype(np.int8))],
예제 #6
0
 def __init__(self, strategy1, strategy2, num_segments):
     super(Net, self).__init__()
     self.merge_op = P.UnsortedSegmentSum().shard((strategy1, strategy2))
     self.num_segments = num_segments
예제 #7
0
 def __init__(self, num_segments):
     super(UnsortedSegmentSumNet, self).__init__()
     self.unsorted_segment_sum = P.UnsortedSegmentSum()
     self.num_segments = num_segments
예제 #8
0
 def __init__(self):
     super(Net, self).__init__()
     self.unq = P.Unique()
     self.segment_ids = Tensor([0, 0, 1, 2, 1, 1, 1, 1], mstype.int32)
     self.sum = P.UnsortedSegmentSum()
예제 #9
0
 def __init__(self):
     super(Net, self).__init__()
     self.seg_sum = P.UnsortedSegmentSum()
예제 #10
0
    tile = ops.Tile()
    reduce_sum = ops.ReduceSum(keep_dims=False)
    square = ops.Square()
    argmin = ops.Argmin()

    centroid_matrix = reshape(tile(centroids, (num_pts, 1)),
                              (num_pts, k, num_feats))
    point_matrix = reshape(tile(data_points, (1, k)), (num_pts, k, num_feats))
    distances = reduce_sum(square(point_matrix - centroid_matrix), 2)
    centroid_group = argmin(distances)

    return centroid_group


# 计算三个堆的平均距离更新堆中新的中心点
unsorted_segment_sum = ops.UnsortedSegmentSum()
ones_like = ops.OnesLike()


def data_group_avg(group_ids, data):
    # 分组求和
    sum_total = unsorted_segment_sum(data, group_ids, 3)
    #计算堆大小
    num_total = unsorted_segment_sum(ones_like(data), group_ids, 3)
    #求距离均值
    avg_by_group = sum_total / num_total
    return avg_by_group


assign = ops.Assign()
# 遍历循环训练,更新每组分类的中心点
예제 #11
0
 def __init__(self, num_segments):
     super(UnsortedSegmentSumDynNet, self).__init__()
     self.unsorted_segment_sum = P.UnsortedSegmentSum()
     self.to_dyn_op = inner.GpuConvertToDynamicShape()
     self.num_segments = num_segments