Ejemplo n.º 1
0
    def __init__(self,
                 probs=None,
                 seed=None,
                 dtype=mstype.int32,
                 name="Categorical"):
        param = dict(locals())
        param['param_dict'] = {'probs': probs}
        valid_dtype = mstype.uint_type + mstype.int_type + mstype.float_type
        Validator.check_type_name("dtype", dtype, valid_dtype,
                                  type(self).__name__)
        super(Categorical, self).__init__(seed, dtype, name, param)

        self._probs = self._add_parameter(probs, 'probs')
        if self.probs is not None:
            check_rank(self.probs)
            check_prob(self.probs)
            check_sum_equal_one(probs)

            # update is_scalar_batch and broadcast_shape
            # drop one dimension
            if self.probs.shape[:-1] == ():
                self._is_scalar_batch = True
            self._broadcast_shape = self._broadcast_shape[:-1]

        self.argmax = P.ArgMaxWithValue(axis=-1)
        self.broadcast = broadcast_to
        self.cast = P.Cast()
        self.clip_by_value = C.clip_by_value
        self.concat = P.Concat(-1)
        self.cumsum = P.CumSum()
        self.dtypeop = P.DType()
        self.exp = exp_generic
        self.expand_dim = P.ExpandDims()
        self.fill = P.Fill()
        self.gather = P.GatherNd()
        self.greater = P.Greater()
        self.issubclass = P.IsSubClass()
        self.less = P.Less()
        self.log = log_generic
        self.log_softmax = P.LogSoftmax()
        self.logicor = P.LogicalOr()
        self.logicand = P.LogicalAnd()
        self.multinomial = P.Multinomial(seed=self.seed)
        self.reshape = P.Reshape()
        self.reduce_sum = P.ReduceSum(keep_dims=True)
        self.select = P.Select()
        self.shape = P.Shape()
        self.softmax = P.Softmax()
        self.squeeze = P.Squeeze()
        self.squeeze_first_axis = P.Squeeze(0)
        self.squeeze_last_axis = P.Squeeze(-1)
        self.square = P.Square()
        self.transpose = P.Transpose()
        self.is_nan = P.IsNan()

        self.index_type = mstype.int32
        self.nan = np.nan
Ejemplo n.º 2
0
 def __init__(self,
              probs=None,
              logits=None,
              seed=None,
              dtype=mstype.int32,
              name="Categorical"):
     param = dict(locals())
     valid_dtype = mstype.int_type
     check_type(dtype, valid_dtype, "Categorical")
     super(Categorical, self).__init__(seed, dtype, name, param)
     if (probs is None) == (logits is None):
         raise_probs_logits_error()
     self.reduce_sum = P.ReduceSum(keep_dims=True)
     self.reduce_sum1 = P.ReduceSum(keep_dims=False)
     self.log = P.Log()
     self.exp = P.Exp()
     self.shape = P.Shape()
     self.reshape = P.Reshape()
     self.div = P.RealDiv()
     self.size = P.Size()
     self.mutinomial = P.Multinomial(seed=self.seed)
     self.cast = P.Cast()
     self.expandim = P.ExpandDims()
     self.gather = P.GatherNd()
     self.concat = P.Concat(-1)
     self.transpose = P.Transpose()
     if probs is not None:
         self._probs = cast_to_tensor(probs, mstype.float32)
         input_sum = self.reduce_sum(self._probs, -1)
         self._probs = self.div(self._probs, input_sum)
         self._logits = probs_to_logits(self._probs)
         self._param = self._probs
     else:
         self._logits = cast_to_tensor(logits, mstype.float32)
         input_sum = self.reduce_sum(self.exp(self._logits), -1)
         self._logits = self._logits - self.log(input_sum)
         self._probs = logits_to_probs(self._logits)
         self._param = self._logits
     self._num_events = self.shape(self._param)[-1]
     self._param2d = self.reshape(self._param, (-1, self._num_events))
     self._batch_shape = self.shape(self._param)[:-1]
     self._batch_shape_n = (1, ) * len(self._batch_shape)
Ejemplo n.º 3
0
 def __init__(self,
              probs=None,
              logits=None,
              seed=0,
              dtype=mstype.int32,
              name="Categorical"):
     param = dict(locals())
     super(Categorical, self).__init__(seed, dtype, name, param)
     if (probs is None) == (logits is None):
         raise ValueError(
             "Either 'prob' or 'logits' must be specified, but not both.")
     self.reduce_sum = P.ReduceSum(keep_dims=True)
     self.log = P.Log()
     self.exp = P.Exp()
     self.shape = P.Shape()
     self.reshape = P.Reshape()
     self.div = P.RealDiv()
     self.size = P.Size()
     self.mutinomial = P.Multinomial(seed=seed)
     self.cast = P.Cast()
     self.expandim = P.ExpandDims()
     self.gather = P.GatherNd()
     self.concat = P.Concat(-1)
     if probs is not None:
         self._probs = cast_to_tensor(probs, mstype.float32)
         input_sum = self.reduce_sum(self._probs, -1)
         self._probs = self.div(self._probs, input_sum)
         self._logits = probs_to_logits(self._probs)
         self._param = self._probs
     else:
         self._logits = cast_to_tensor(logits, mstype.float32)
         input_sum = self.reduce_sum(self.exp(self._logits), -1)
         self._logits = self._logits - self.log(input_sum)
         self._probs = logits_to_probs(self._logits)
         self._param = self._logits
     self._num_events = self.shape(self._param)[-1]
     self._param2d = self.reshape(self._param, (-1, self._num_events))
     self._batch_shape = self.shape(self._param2d)[0]
Ejemplo n.º 4
0
    def __init__(self,
                 decoder:Model,
                 model_config=None,
                 generate_length:int=1,
                 tokenizer:Optional[GPT2Tokenizer]=None,
                 topk_num:int=0,
                 topp_prob:float=1.0,
                 temperature:float=1.0,
                 min_tokens_to_keep:int=1,
                 early_stop:bool=False,
                 demo_mode:bool=False,
                 return_ids:bool=False,
                 return_last_token_logits:bool=False,
                 append_eos:bool=False):

       
        assert model_config is not None, 'Config is a must for sampling.'
        
        self.model_config = model_config
        self.topk_num = topk_num
        self.topp_prob = topp_prob
        self.temperature = temperature
        self.min_tokens_to_keep = min_tokens_to_keep
        
        self.decoder = decoder
        self.tokenizer = tokenizer
        self.reshape = P.Reshape()
        self.cumsum = P.CumSum()
        self.onehot = P.OneHot()
        self.generate_length = generate_length
        self.seq_length = model_config.seq_length
        self.batch_size = model_config.batch_size
        self.vocab_size = model_config.vocab_size
        
        self.on_value = Tensor(1.0, mstype.float32)
        self.off_value = Tensor(0.0, mstype.float32)
        self.cast = P.Cast()
        self.concat = P.Concat()
        self.early_stop = early_stop
        self.demo_mode = demo_mode
        self.return_ids = return_ids
        self.return_last_token_logits = return_last_token_logits
        self.append_eos = append_eos
        self.device_target = get_context("device_target")

        #different choice of sample function for adjusting several device target types
        if self.device_target == "GPU":
            self.sample_function = P.Multinomial(seed=1)
        elif self.device_target == "Ascend":
            self.sample_function = P.RandomCategorical(mstype.int32)
        else:
            raise NotImplementedError("Device Target {} not supported.".format(self.device_target))

        self.filter_distribution = TopKTopP_Filter(self.batch_size,
                                                   self.vocab_size,
                                                   k=self.topk_num,
                                                   p=self.topp_prob,
                                                   temperature=self.temperature,
                                                   min_tokens_to_keep=self.min_tokens_to_keep)

        if self.tokenizer is not None:
            self.eos_id = self.tokenizer.eos_token_id
        else:
            self.eos_id = model_config.vocab_size-1

        if self.tokenizer is not None:
            self.eos_text = self.tokenizer.eos_token
        else:
            self.eos_text = "<|endoftext|>"

        if self.demo_mode is True:
            assert self.batch_size == 1, 'Demo mode requires batchsize euqals to 1, but get batch_size={}'.format(
                self.batch_size)