コード例 #1
0
    def forward(self,
                query: Tensor,
                key: Tensor,
                value: Tensor,
                mask: Tensor = None) -> Tensor:
        """
        @param query shape -> [batch_size, max_length, emb_size]
        @param key shape -> [batch_size, max_length, emb_size]
        @param value shape -> [batch_size, max_length, emb_size]
        @param mask shape -> [1, max_length, max_length]
        @return a tensor with shape -> ??
        """
        if mask is not None:
            # 1, n, n -> 1, 1, n, n; n is max length of sentence
            mask = mask.unsqueeze(1)
        batch_size = query.size(0)

        # do projection
        query, key, value = [
            linear_f(x).view(batch_size, -1, self.head_count,
                             self.model_k_dim).transpose(1, 2)
            for linear_f, x in zip(self.linears, (query, key, value))
        ]
        # do attention
        x, self.attn = attention(query, key, value, mask, self.dropout)
        # do concatenation
        x = x.transpose(1, 2).contiguous().view(
            batch_size, -1, self.head_count * self.model_k_dim)
        return self.linears[-1](x)
コード例 #2
0
    def __init__(self, input_size: int, hidden_size: int, bias: bool = True, p: Tuple[float, float] = (0.5, 0.5),
                 initializer: Callable[[Tensor], None] = None) -> None:
        super(VarLSTMCell, self).__init__()
        self.input_size: int = input_size
        self.hidden_size: int = hidden_size
        self.bias: bool = bias
        self.weight_ih: Tensor = Parameter(Tensor(4, input_size, hidden_size))
        self.weight_hh: Tensor = Parameter(Tensor(4, hidden_size, hidden_size))
        if bias:
            self.bias_ih: Tensor = Parameter(Tensor(4, hidden_size))
            self.bias_hh: Tensor = Parameter(Tensor(4, hidden_size))
        else:
            self.register_parameter('bias_ih', None)
            self.register_parameter('bias_hh', None)

        self.initializer: Callable[[Tensor], None] \
            = default_initializer(self.hidden_size) if initializer is None else initializer
        self.reset_parameters()
        p_in, p_hidden = p
        if p_in < 0. or p_in > 1.:
            raise ValueError("input dropout probability has to be between 0 and 1, "
                             "but got {}".format(p_in))
        if p_hidden < 0. or p_hidden > 1.:
            raise ValueError("hidden state dropout probability has to be between 0 and 1, "
                             "but got {}".format(p_hidden))
        self.p_in: float = p_in
        self.p_hidden: float = p_hidden
        self.noise_in: Tensor = None
        self.noise_hidden: Tensor = None
コード例 #3
0
 def __init__(self, n_in:int, n_out:int, objective:str, y_range:Optional[Union[Tuple,np.ndarray]]=None, bias_init:Optional[float]=None,
              y_mean:Optional[Union[float,List[float],np.ndarray]]=None, y_std:Optional[Union[float,List[float],np.ndarray]]=None,
              lookup_init:Callable[[str,Optional[int],Optional[int]],Callable[[Tensor],None]]=lookup_normal_init, freeze:bool=False):
     super().__init__(n_in=n_in, n_out=n_out, objective=objective, bias_init=bias_init, lookup_init=lookup_init, freeze=freeze)
     self.y_range,self.y_mean,self.y_std = y_range,y_mean,y_std
     self.rescale = False
     if self.y_range is not None and (self.y_mean is not None or self.y_std is not None):
         raise ValueError("Both y_range (sigmoid output and rescaling) and y_mean + y_std (linear output and rescaling) are set. Please only set either.")
     if (self.y_mean is None and self.y_std is not None) or (self.y_mean is not None and self.y_std is None):
         raise ValueError("Only one of y_mean or y_std is set, but not both. Please set both or neither.")
     if self.y_mean is not None and self.y_std is not None and bias_init is not None:
         print("y_mean and y_std are both set, but so is bias_init. Bias init will be set to zero to provide accurate rescaling")
         self.bias_init = None
     if self.y_range is not None:
         if not isinstance(self.y_range, np.ndarray): self.y_range = np.array(self.y_range)
         self.y_min = np.array(np.min(self.y_range, axis=-1), dtype='float32')
         self.y_diff = np.abs(self.y_range.take([1], axis=-1)-self.y_range.take([0], axis=-1)).ravel()
         self.y_min, self.y_diff = to_device(Tensor(self.y_min)), to_device(Tensor(self.y_diff))
     elif self.y_mean is not None and self.y_std is not None:
         if not hasattr(self.y_mean, 'len'): self.y_mean = [self.y_mean]
         if not hasattr(self.y_mean, 'len'): self.y_std  = [self.y_std]
         self.y_mean,self.y_std = to_device(Tensor(self.y_mean)),to_device(Tensor(self.y_std))
         self.rescale = True
     self._build_layers()
     if self.freeze: self.freeze_layers()
コード例 #4
0
    def forward(self, x: Tensor) -> Tensor:
        x = self.melspectrogram(x)
        x = x.unsqueeze(1)
        x = self.norm_input(x)

        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = self.maxpool(x)
        x = self.drop1(x)

        x = F.relu(self.conv3(x))
        x = F.relu(self.conv4(x))
        x = self.maxpool(x)
        x = self.drop2(x)

        x = F.relu(self.conv5(x))
        x = self.maxpool(x)
        x = self.drop2(x)

        x = F.relu(self.conv6(x))
        x = self.maxpool(x)
        x = self.drop2(x)

        x = F.relu(self.conv7(x))
        x = self.maxpool(x)
        x = self.drop2(x)

        x = self.fc(x.flatten(start_dim=1))
        x = self.fc_norm(x)
        return self.linear(x)
コード例 #5
0
 def __init__(self,
              n_in: int,
              n_out: int,
              objective: str,
              y_range: Optional[Union[Tuple, np.ndarray]] = None,
              bias_init: Optional[float] = None,
              lookup_init: Callable[[str, Optional[int], Optional[int]],
                                    Callable[[Tensor],
                                             None]] = lookup_normal_init,
              freeze: bool = False):
     super().__init__(n_in=n_in,
                      n_out=n_out,
                      objective=objective,
                      bias_init=bias_init,
                      lookup_init=lookup_init,
                      freeze=freeze)
     self.y_range = y_range
     if self.y_range is not None:
         if not isinstance(self.y_range, np.ndarray):
             self.y_range = np.array(self.y_range)
         self.y_min = np.array(np.min(self.y_range, axis=-1),
                               dtype='float32')
         self.y_diff = np.abs(
             self.y_range.take([1], axis=-1) -
             self.y_range.take([0], axis=-1)).ravel()
         self.y_min, self.y_diff = to_device(Tensor(self.y_min)), to_device(
             Tensor(self.y_diff))
     self._build_layers()
     if self.freeze: self.freeze_layers()
コード例 #6
0
ファイル: metrics.py プロジェクト: GoWeiXH/DirectPyTorch
def multi_class_correct(y_pre: Tensor,
                        y_true: Tensor,
                        threshold=0.5,
                        device='cpu') -> Tensor:
    y_pre, y_true = y_pre.argmax(dim=1), y_true.argmax(dim=1)
    same = torch.as_tensor(y_pre == y_true, dtype=torch.int).to(device)
    return torch.sum(same)
コード例 #7
0
    def tolist(paired_wavs: List[Tensor], paired_feature: Tensor):
        assert paired_feature.dim() == 3
        # (batch_size, max_seq_len, feat_dim)

        ratio = max([len(wav) for wav in paired_wavs]) / paired_feature.size(1)
        feature_len = [round(len(wav) / ratio) for wav in paired_wavs]
        feature = [f[:l] for f, l in zip(paired_feature, feature_len)]
        return feature
コード例 #8
0
def runModel(coreTensor, inputSize: int, hiddenSize: int, d, model):
    '''
    This function runs LSTM for one-way
    :param coreTensor:
    :return:
    '''
    # d = D('cuda')
    # logging.info('Core Tensor dumping.......')
    # pickle.dump(coreTensor, open('coreTensor.pkl', 'wb'))
    # logging.info('Core Tensor dumped.')
    logging.info('Core Tensor loading.......')
    coreTensor = pickle.load(open('coreTensor.pkl', 'rb'))
    logging.info('Core Tensor loaded.')
    coreLSTM = nn.LSTM(input_size=inputSize, hidden_size=hiddenSize).to(d)
    batch = 0
    logging.info('LSTM Start.(Core-One way)')
    retList = []
    sum_time = 0
    for news_tensor in coreTensor:
        time_start = time.time()
        inputs_2 = []
        for word in news_tensor:
            inputs_2.append(model[word])
        inputs = Tensor(inputs_2).to(d)
        hidden = (torch.rand(1, 1, hiddenSize).to(d), torch.rand(1, 1, hiddenSize).to(d))
        iter_time = 0
        out = Tensor().to(d)
        out_list = []
        for i in inputs:
            time_per_iter = time.time()
            out, hidden = coreLSTM(i.view(1, 1, -1), hidden)
            out_list = [out]

            # logging.info('\t\t + Iter: %d/%d Time: %.3lf s, remain %d iterations, ETA: %.3lf s' % (
            # iter_time + 1, len(inputs), time.time() - time_per_iter, len(inputs) - iter_time - 1,
            # (len(inputs) - iter_time - 1) * (time.time() - time_per_iter)))

            iter_time = iter_time + 1
        sum_time += int(time.time() - time_start)
        avg_time = 1.0 * sum_time / (batch + 1)
        remainSecTot = 1.0 * (len(coreTensor) - batch - 1) * (avg_time)
        remainMin = (int(remainSecTot) % 3600) // 60
        remainHour = int(remainSecTot) // 3600
        remainSec = int(remainSecTot) % 60
        logging.info('* News: %d/%d,Time: %.2lfs, remain %d news, ETA: %d:%d:%d s' % (
        batch + 1, len(coreTensor), time.time() - time_start, len(coreTensor) - batch - 1, remainHour, remainMin, remainSec))
        fileName = 'processVec/Direct-LSTM-Oneway_Sentence_' + str(batch) + '.pkl'
        foutput = open(fileName, 'wb')
        pickle.dump(out_list, foutput)
        # retList.append(out)
        batch = batch + 1
        del out
        del out_list
        del inputs_2
        del hidden
    logging.info('LSTM Finish.(Core-One way)')
    return retList
コード例 #9
0
    def evaluate(self,
                 inputs: Union[Tensor, np.ndarray, Tuple[Tensor, Tensor],
                               Tuple[np.ndarray, np.ndarray]],
                 targets: Union[Tensor, np.ndarray],
                 weights: Optional[Union[Tensor, np.ndarray]] = None,
                 callbacks: Optional[List[AbsCallback]] = None,
                 mask_inputs: bool = True) -> float:
        r'''
        Compute loss on provided data.

        Arguments:
            inputs: input data
            targets: targets
            weights: Optional weights
            callbacks: list of any callbacks to use during evaluation
            mask_inputs: whether to apply input mask if one has been set

        Returns:
            (weighted) loss of model predictions on provided data
        '''

        if callbacks is None: callbacks = []
        self.model.eval()
        if not isinstance(inputs, Tensor):
            if isinstance(inputs, tuple):
                if not isinstance(inputs[0], Tensor):
                    inputs = (to_device(Tensor(inputs[0]).float()),
                              to_device(Tensor(inputs[1]).float()))
            else:
                inputs = to_device(Tensor(inputs).float())
        for c in callbacks:
            c.on_eval_begin(inputs=inputs, targets=targets, weights=weights)
        if self.input_mask is not None and mask_inputs:
            if isinstance(inputs, tuple):
                inputs[0] = inputs[0][:, self.input_mask]
            else:
                inputs = inputs[:, self.input_mask]
        y_pred = self.model(inputs)

        if not isinstance(targets, Tensor):
            targets = to_device(Tensor(targets))
        if weights is not None and not isinstance(weights, Tensor):
            weights = to_device(Tensor(weights))

        if 'multiclass' in self.objective and not isinstance(
                targets, torch.LongTensor):
            targets = targets.long().squeeze()
        elif 'multiclass' not in self.objective and not isinstance(
                targets, torch.FloatTensor):
            targets = targets.float()

        loss = self.loss(weight=weights)(
            y_pred, targets) if weights is not None else self.loss()(y_pred,
                                                                     targets)
        for c in callbacks:
            c.on_eval_end(loss=loss)
        return loss.data.item()
コード例 #10
0
    def test_on_batch(self, inputs, outputs, verbose=0):
        """

        :param inputs:
        :param outputs:
        :param verbose:
        :return:
        """
        grapheme_root = Tensor(outputs[0])
        vowel_diacritic = Tensor(outputs[1])
        consonant_diacritic = Tensor(outputs[2])

        _, grapheme_root_tru = grapheme_root.max(1)
        _, vowel_diacritic_tru = vowel_diacritic.max(1)
        _, consonant_diacritic_tru = consonant_diacritic.max(1)

        grapheme_root_hat, vowel_diacritic_hat, consonant_diacritic_hat = self.predict(inputs)
        grapheme_root_hat = grapheme_root_hat.cpu()
        vowel_diacritic_hat = vowel_diacritic_hat.cpu()
        consonant_diacritic_hat = consonant_diacritic_hat.cpu()

        if verbose == 1:
            print(grapheme_root_tru[:10])
            print(grapheme_root_hat[:10])
            print(vowel_diacritic_tru[:10])
            print(vowel_diacritic_hat[:10])
            print(consonant_diacritic_tru[:10])
            print(consonant_diacritic_hat[:10])

        grapheme_root = accuracy_score(grapheme_root_tru, grapheme_root_hat)
        vowel_diacritic = accuracy_score(vowel_diacritic_tru, vowel_diacritic_hat)
        consonant_diacritic = accuracy_score(consonant_diacritic_tru, consonant_diacritic_hat)

        print("accuracy: grapheme root {}, vowel diacritic {}, consonant diacritic {}".format(grapheme_root, vowel_diacritic, consonant_diacritic))
コード例 #11
0
def logsumexp(x: Tensor, dim: int) -> Tensor:
    """
    Args:
        x: A pytorch tensor (any dimension will do)
        dim: int, over which to perform the summation.

    Returns: The result of the log(sum(exp(...))) operation.
    """
    xmax, _ = x.max(dim, keepdim=True)
    xmax_, _ = x.max(dim)
    return xmax_ + torch.log(torch.exp(x - xmax).sum(dim))
コード例 #12
0
    def nests_loss(self, energy: Tensor, target: Tensor) -> Tensor:
        """
        Args:
            energy: Tensor
                the energy tensor with shape = [length, num_label, num_label]
            target: Tensor
                the tensor of target labels with shape [length]

        Returns: Tensor
                A 0D tensor for minus log likelihood loss
        """
        length, _, _ = energy.size()

        num_label_3 = self.indices_is.size(0)

        indices_3 = energy.new_empty((length, num_label_3)).long()
        indices_3[0, :] = self.indices_bs
        if length > 2:
            indices_3[1:length - 1, :] = self.indices_is.repeat(
                (length - 2, 1))
        indices_3[length - 1, :] = self.indices_es

        # shape = [num_label]
        partition_1 = None
        partition_3 = None

        # shape = []
        prev_label = self.index_bos
        tgt_energy = 0

        for t in range(length):
            # shape = [num_label, num_label]
            curr_energy = energy[t]
            if t == 0:
                partition_1 = curr_energy[self.index_bos, :]
                partition_3 = energy.new_full((num_label_3, ), -1e4)
            else:
                # shape = [num_label]
                partition = partition_1.clone()
                partition[indices_3[t - 1]] = partition_3
                partition_1 = logsumexp(curr_energy + partition_1.unsqueeze(1),
                                        dim=0)
                partition_3 = logsumexp(curr_energy[:, indices_3[t]] +
                                        partition.unsqueeze(1),
                                        dim=0)
            label = target[t]
            tgt_energy += curr_energy[prev_label, label]
            prev_label = label

        t = length - 1
        curr_energy = self.trans_matrix.data[:, self.index_eos]
        partition = curr_energy + partition_1
        partition[indices_3[t]] = curr_energy[indices_3[t]] + partition_3
        return logsumexp(partition, dim=0) - tgt_energy
コード例 #13
0
ファイル: functional.py プロジェクト: ropas/pytea
def arange(start, end=None, step=1, out=None, **kwargs):
    if end is None:  # arange(N)
        end = start
        start = 0
    if isinstance(start, int) and isinstance(end, int) and isinstance(step, int):
        dtype = torch.intDefault
    else:
        dtype = torch.floatDefault
    tensor = Tensor(int((end - start) / step))
    tensor.dtype = dtype
    LibCall.torch.copyOut(tensor, out)
    return tensor
コード例 #14
0
 def get_inputs(
         self,
         on_device: bool = False) -> Union[Tensor, Tuple[Tensor, Tensor]]:
     if on_device:
         if self.matrix_inputs is None:
             return to_device(Tensor(self.inputs))
         else:
             return (to_device(Tensor(self.inputs)),
                     to_device(Tensor(self.matrix_inputs)))
     else:
         if self.matrix_inputs is None: return self.inputs
         else: return (self.inputs, self.matrix_inputs)
コード例 #15
0
    def calc_loss(self, q_values: Tensor, target_q_values: Tensor,
                  actions: Tensor, rewards: Tensor,
                  done_mask: Tensor) -> Tensor:
        """
        Calculate the MSE loss of this step.
        The loss for an example is defined as:
            Q_samp(s) = r if done
                        = r + gamma * max_a' Q_target(s', a') otherwise
            loss = (Q_samp(s) - Q(s, a))^2

        Args:
            q_values: (torch tensor) shape = (batch_size, num_actions)
                The Q-values that your current network estimates (i.e. Q(s, a') for all a')
            target_q_values: (torch tensor) shape = (batch_size, num_actions)
                The Target Q-values that your target network estimates (i.e. (i.e. Q_target(s', a') for all a')
            actions: (torch tensor) shape = (batch_size,)
                The actions that you actually took at each step (i.e. a)
            rewards: (torch tensor) shape = (batch_size,)
                The rewards that you actually got at each step (i.e. r)
            done_mask: (torch tensor) shape = (batch_size,)
                A boolean mask of examples where we reached the terminal state

        Hint:
            You may find the following functions useful
                - torch.max
                - torch.sum
                - torch.nn.functional.one_hot
                - torch.nn.functional.mse_loss
            You can treat `done_mask` as a 0 and 1 where 0 is not done and 1 is done using torch.type as
            done below

            To extract Q(a) for a specific "a" you can use the torch.sum and torch.nn.functional.one_hot. 
            Think about how.
        """
        # you may need this variable
        num_actions = self.env.action_space.n
        gamma = self.config.gamma
        done_mask = done_mask.type(torch.int)
        actions = actions.type(torch.int64)
        ##############################################################
        ##################### YOUR CODE HERE - 3-5 lines #############
        target_q = torch.reshape(
            torch.max(target_q_values, dim=1, keepdim=True).values, (-1, ))
        q_val1 = rewards + (1 - done_mask) * gamma * target_q
        q_val2 = torch.sum(
            q_values *
            torch.nn.functional.one_hot(actions, self.env.action_space.n),
            dim=1)
        loss = torch.nn.functional.mse_loss(q_val1, q_val2)
        ##############################################################
        ######################## END YOUR CODE #######################
        return loss
コード例 #16
0
ファイル: cgan.py プロジェクト: VictorZuanazzi/SumOfAGan
    def forward(self, img: Tensor, labels: int):
        """Forward pass of the Discriminator.

        Args:
            img: the image that should be classified in fake or real.
            labels: the label of the image.

        Returns:

        """
        d_in = torch.cat(
            (img.view(img.size(0), -1), self.label_embedding(labels)), -1)
        score = self.model(d_in)
        return score
コード例 #17
0
ファイル: models.py プロジェクト: prem2017/hackathon
    def forward(self, X: Tensor, initial_states=None):

        #if self.init_states is None:
        self.init_states = torch.zeros(
            self.gru_hidden_layers * self.num_directions,
            X.size(self.batch_index), self.hidden_dimensions)

        # self.init_states = self.init_states.to(util.device)

        # TODO
        if X.shape[self.batch_index] != self.init_states.shape[1]:
            pass

        #
        output_gru, initial_states = self.gru_encoder(X, self.init_states)

        # TODO: if batchnorm handle differently
        # if self.use_batchnorm:
        # 	pass

        # TODO: if birdirectional handle differently [Note?]: This task should not need bidirectional RNN
        # Remember that initial states will be [(self.gru_hidden_layers * self.num_directions) x (X.shape[self.batch_index]) x (self.hidden_dimensions)]
        #
        # initial_states[-self.num_directions:, :, :] # output_gru[:,-1, :self.hidden_dimensions].view(1, -1, self.hidden_dimensions)
        return output_gru[:, -1, :self.hidden_dimensions].view(
            1, -1, self.hidden_dimensions)  # initial_states[-1, :, :]
コード例 #18
0
    def forward(self, input: Tensor, mask: Tensor = None) -> Tensor:
        """
        Args:
            input: Tensor
                the input tensor with shape = [batch, length, input_size]
            mask: Tensor or None
                the mask tensor with shape = [batch, length]

        Returns: Tensor
            the energy tensor with shape = [batch, length, num_label, num_label]

        """
        batch, length, _ = input.size()

        # compute out_s by tensor dot [batch, length, input_size] * [input_size, num_label]
        # thus out_s should be [batch, length, num_label] --> [batch, length, 1, num_label]
        out_s = self.state_nn(input)

        if mask is not None:
            out_s[:, :, self.index_eos] += (mask == 0).float() * 2e4

        # [batch, length, num_label, num_label]
        output = self.trans_matrix + out_s.unsqueeze(2)

        return output
コード例 #19
0
def biLSTM(all_corpus: list, model, d=None) -> (Tensor, object):
    if d is None:
        d = D('cpu')
    else:
        d = D('cuda')
    MatrixRet = []
    size = 0
    totalLength = len(all_corpus)
    cmtt = 0

    for topic in all_corpus:
        word_list_vec = []
        for news in topic:
            if size != 0:
                break
            for sentence in news:
                if size != 0:
                    break
                word_list = sentence.split(' ')
                for word in word_list:
                    if word != '' and word != '\n' and word != ' ' and word != '  ':
                        vec = model[word]
                        word_list_vec.append(word)
                        size = len(vec)
                        break
        # word_list_vec = FT(word_list_vec)
        MatrixRet.append(word_list_vec)
        cmtt = cmtt + 1
        logging.info('\t * News: %d/%d' % (cmtt, totalLength))
    ret1st = runModel(MatrixRet, size, size, d, model)
    return (Tensor([0,0]), ret1st)
コード例 #20
0
    def forward(self, input: Tensor, mask: Tensor = None, hx: Tuple[Tensor, Tensor] = None) -> Tuple[Tensor, Tensor]:
        batch_size = input.size(0) if self.batch_first else input.size(1)
        if hx is None:
            num_directions = 2 if self.bidirectional else 1
            hx = input.new_zeros((self.num_layers * num_directions, batch_size, self.hidden_size))
            hx = (hx, hx)

        func = rnn_f.autograd_var_masked_rnn(num_layers=self.num_layers,
                                             batch_first=self.batch_first,
                                             bidirectional=self.bidirectional,
                                             lstm=True)

        self.reset_noise(batch_size)

        output, hidden = func(input, self.all_cells, hx, None if mask is None else mask.view(mask.size() + (1,)))
        return output, hidden
コード例 #21
0
    def forward(self, x: Tensor) -> Tensor:
        x = self.conv(x)
        x = x.view(-1, 512 * 2 * 2)

        x = self.linear(x)

        return x
コード例 #22
0
 def predict_recursively(preds: Tensor, energy: Tensor,
                         offset: int) -> NestedSequenceLabel:
     length = preds.size(0)
     nested_preds_list = []
     index = 0
     while index < length:
         id = preds[index]
         if id == eos_id:
             break
         if id != o_id:
             if id == b_id:  # B-XXX
                 start_tmp = index
                 index += 1
                 if index == length:
                     break
                 id = preds[index]
                 while id == i_id:  # I-XXX
                     index += 1
                     if index == length:
                         break
                     id = preds[index]
                 if id == e_id:  # E-XXX
                     end_tmp = index + 1
                     nested_preds = decode_nest(
                         energy[start_tmp:end_tmp, :, :])
                     nested_preds_list.append(
                         predict_recursively(
                             nested_preds,
                             energy[start_tmp:end_tmp, :, :],
                             start_tmp + offset))
         index += 1
     return NestedSequenceLabel(offset, length + offset, preds,
                                nested_preds_list)
コード例 #23
0
 def forward(self, logits: Tensor, mask: Tensor) -> Tensor:
     """Adds the loss functions, weighted by the prefactor."""
     loss = logits.new_zeros(())
     for loss_fn, prefact in self.loss_fns.items():
         if prefact != 0:
             loss += prefact * loss_fn(logits, mask)
     return loss
コード例 #24
0
    def loss(self,
             input: Tensor,
             target: Tensor,
             mask: Tensor = None) -> Tuple[Tensor, Tensor]:
        """
        Args:
            input: Tensor
                the input tensor with shape = [batch, length, input_size]
            target: Tensor
                the tensor of target labels with shape [batch, length]
            mask: Tensor or None
                the mask tensor with shape = [batch, length]

        Returns: Tensor
                A 1D tensor for minus log likelihood loss
        """
        batch, length, _ = input.size()
        energy = self.forward(input, mask=mask)
        # shape = [length, batch, num_label, num_label]
        energy_transpose = energy.transpose(0, 1)
        # shape = [length, batch]
        target_transpose = target.transpose(0, 1)

        # shape = [batch, num_label]
        partition = None

        # shape = [batch]
        batch_index = torch.arange(0, batch).type_as(input).long()
        prev_label = input.new_full((batch, ), self.index_bos).long()
        tgt_energy = input.new_zeros(batch)

        for t in range(length):
            # shape = [batch, num_label, num_label]
            curr_energy = energy_transpose[t]
            if t == 0:
                partition = curr_energy[:, self.index_bos, :]
            else:
                # shape = [batch, num_label]
                partition = logsumexp(curr_energy + partition.unsqueeze(2),
                                      dim=1)
            label = target_transpose[t]
            tgt_energy += curr_energy[batch_index, prev_label, label]
            prev_label = label

        return logsumexp(
            self.trans_matrix.data[:, self.index_eos].unsqueeze(0) + partition,
            dim=1) - tgt_energy, energy
コード例 #25
0
def slice_last_dim(d: Tensor, length: int = 160) -> Tensor:
    """
    Slice last dimention if length is too much.
    If input is shorter than `length`, error is thrown.
    [..., L>160] => [..., L==160]
    """
    start = torch.randint(0, d.size()[-1] - (length - 1), (1, )).item()
    return torch.narrow(d, -1, start, length)
コード例 #26
0
    def predict(self, inputs):
        """
        Predits with the model based on the given input feature values
        :param inputs: array
            Input feature values
        :return: (Tensor, Tensor, Tensor)
            Indices for grapheme_root, vowel_diacritic, consonant_diacritic
        """
        inputs = Tensor(inputs)
        inputs = inputs.to(self._device)
        grapheme_root_hat, vowel_diacritic_hat, consonant_diacritic_hat = self(inputs)

        _, grapheme_root_indices = grapheme_root_hat.max(1)
        _, vowel_diacritic_indices = vowel_diacritic_hat.max(1)
        _, consonant_diacritic_indices = consonant_diacritic_hat.max(1)

        return grapheme_root_indices, vowel_diacritic_indices, consonant_diacritic_indices
コード例 #27
0
def read_image(image_path):
    """读出图片数据"""
    img = transform(cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_RGB2BGR))
    img = np.reshape(img, (3, 32, 32))
    # debug: 输出图片
    # img = to_pil_image(img)
    # img.show()
    return Tensor(img).unsqueeze(0)
コード例 #28
0
    def predict_array(self,
                      arr: Union[np.ndarray, Tuple[np.ndarray, np.ndarray]],
                      n_models: Optional[int] = None,
                      parent_bar: Optional[master_bar] = None,
                      display: bool = True,
                      callbacks: Optional[List[AbsCallback]] = None,
                      bs: Optional[int] = None) -> np.ndarray:
        r'''
        Apply ensemble to Numpy array and get predictions. If an output pipe has been added to the ensemble, then the predictions will be deprocessed.
        Inputs are expected to be preprocessed; i.e. any input pipe added to the ensemble is not used.

        Arguments:
            arr: input data
            n_models: number of models to use in predictions as ranked by the metric which was used when constructing the :class:`~lumin.nn.ensemble.ensemble.Ensemble`.
                By default, entire ensemble is used.
            parent_bar: not used when calling the method directly
            display: whether to display a progress bar for model evaluations
            callbacks: list of any callbacks to use during evaluation
            bs: if not `None`, will run prediction in batches of specified size to save of memory

        Returns:
            Numpy array of predictions

        Examples::
            >>> preds = ensemble.predict_array(inputs)
        '''

        n_models = len(self.models) if n_models is None else n_models
        models = self.models[:n_models]
        weights = self.weights[:n_models]
        weights = weights / weights.sum()

        if isinstance(arr, tuple):
            arr = (to_device(Tensor(arr[0])), to_device(Tensor(arr[1])))
            pred = np.zeros((len(arr[0]), self.n_out))
        else:
            arr = to_device(Tensor(arr))
            pred = np.zeros((len(arr), self.n_out))

        for i, m in enumerate(
                progress_bar(models, parent=parent_bar, display=display)):
            tmp_pred = m.predict(arr, callbacks=callbacks, bs=bs)
            if self.output_pipe is not None:
                tmp_pred = self.output_pipe.inverse_transform(Xt=tmp_pred)
            pred += weights[i] * tmp_pred
        return pred
コード例 #29
0
ファイル: functional.py プロジェクト: ropas/pytea
def eye(n, m=None, out=None, dtype=None, **kwargs):
    if dtype is None:
        dtype = torch.floatDefault
    if m is None:
        m = n
    tensor = Tensor(n, m, dtype=dtype)
    LibCall.torch.copyOut(tensor, out)
    return tensor
コード例 #30
0
    def calc_loss(self, q_values: Tensor, target_q_values: Tensor,
                  actions: Tensor, rewards: Tensor,
                  done_mask: Tensor) -> Tensor:
        """
        Calculate the MSE loss of this step.
        The loss for an example is defined as:
            Q_samp(s) = r if done
                        = r + gamma * max_a' Q_target(s', a')
            loss = (Q_samp(s) - Q(s, a))^2

        Args:
            q_values: (torch tensor) shape = (batch_size, num_actions)
                The Q-values that your current network estimates (i.e. Q(s, a') for all a')
            target_q_values: (torch tensor) shape = (batch_size, num_actions)
                The Target Q-values that your target network estimates (i.e. (i.e. Q_target(s', a') for all a')
            actions: (torch tensor) shape = (batch_size,)
                The actions that you actually took at each step (i.e. a)
            rewards: (torch tensor) shape = (batch_size,)
                The rewards that you actually got at each step (i.e. r)
            done_mask: (torch tensor) shape = (batch_size,)
                A boolean mask of examples where we reached the terminal state

        Hint:
            You may find the following functions useful
                - torch.max
                - torch.sum
                - torch.nn.functional.one_hot
                - torch.nn.functional.mse_loss
        """
        # you may need this variable
        num_actions = self.env.action_space.n
        gamma = self.config.gamma

        ##############################################################
        ##################### YOUR CODE HERE - 3-5 lines #############
        notdone = 1 - done_mask.to(torch.int64)
        current_q = torch.max(
            q_values *
            torch.nn.functional.one_hot(actions.to(torch.int64), num_actions),
            1).values  # elementwise product to get reward for each batch
        target_q = rewards + notdone * gamma * torch.max(target_q_values,
                                                         1).values

        loss = torch.nn.functional.mse_loss(current_q, target_q)
        return loss