예제 #1
0
def softmax(x, axis=None):
    """
    Softmax function which is `softmax(x) = np.exp(x)/sum(np.exp(x))`.

    Args:
        x (numpy.ndarray): Input array.
        axis (Union[int, tuple[int]]): Axis to compute values along. Default: None.

    Returns:
        numpy.ndarray, has the same shape as x.
    """
    from scipy.special import softmax as scipy_softmax
    return scipy_softmax(x, axis)
예제 #2
0
    def _create_gamut_instances(self, gamut_probabilities_rebin):
        self.gamut_instances = list(
            zip(*np.where(gamut_probabilities_rebin > 0.0)))
        self.gamut_instances = sorted(
            self.gamut_instances,
            key=lambda t: gamut_probabilities_rebin[t[0], t[1]])

        if len(self.gamut_instances) < self.GAMUT_SIZE:
            warnings.warn(
                f"The GAMUT_SIZE is reduce to {len(self.gamut_instances)} cause of the gamut probabilities"
            )
            self.GAMUT_SIZE = len(self.gamut_instances)

        nb_to_remove = len(self.gamut_instances) - self.GAMUT_SIZE
        for _ in range(nb_to_remove):
            self.gamut_instances.pop(0)

        self.instance_probabilities = scipy_softmax(
            np.array([
                self.gamut_probabilities_rebin[t] for t in self.gamut_instances
            ]))

        return self.gamut_instances
예제 #3
0
    def predict_labels(self,
                       data_generator,
                       combine_before_softmax=False,
                       weights=None):
        """Combine class predictions from multiple models by averaging before softmax.

        Parameters
        ----------
        data_generator : DataGenerator object
            data generator to serve data batches
        combine_before_softmax : bool, optional
            True to combine logits across models before taking softmax; False to take softmax for
            each model then combine probabilities
        weights: array-like, str, or NoneType, optional
            array-like: weight for each model
            str: 'entropy': weight each model at each time point by inverse entropy of distribution
            None: uniform weight for each model

        Returns
        -------
        dict
            - 'labels' (list of lists): corresponding labels

        """

        # initialize container for labels
        labels = [[] for _ in range(data_generator.n_datasets)]
        for sess, dataset in enumerate(data_generator.datasets):
            labels[sess] = [np.array([]) for _ in range(dataset.n_sequences)]

        # process data for each model
        labels_all = []
        for model in self.models:
            outputs_curr = model.predict_labels(
                data_generator, return_scores=combine_before_softmax)
            if combine_before_softmax:
                labels_all.append(outputs_curr['scores'])
            else:
                labels_all.append(outputs_curr['labels'])
        # labels_all is a list of list of lists
        # access: labels_all[idx_model][idx_dataset][idx_batch]

        # ensemble prediction across models
        for sess, labels_sess in enumerate(labels):
            for batch, labels_batch in enumerate(labels_sess):

                # labels_curr is of shape (n_models, sequence_len, n_classes)
                labels_curr = np.vstack(l[sess][batch][None, ...]
                                        for l in labels_all)

                # combine predictions across models
                if weights is None:
                    # simple average across models
                    labels_curr = np.mean(labels_curr, axis=0)
                elif isinstance(weights, str) and weights == 'entropy':
                    # weight each model at each time point by inverse entropy of distribution
                    # so that more confident models have a higher weight
                    # compute entropy across labels
                    ent = entropy(labels_curr, axis=-1)
                    # low entropy = high confidence, weight these more
                    w = 1.0 / ent
                    # normalize over models
                    w /= np.sum(w, axis=0)  # shape of (n_models, sequence_len)
                    labels_curr = np.mean(labels_curr * w[..., None], axis=0)
                elif isinstance(weights, (list, tuple, np.ndarray)):
                    # weight each model according to user-supplied weights
                    labels_curr = np.average(labels_curr,
                                             axis=0,
                                             weights=weights)

                if combine_before_softmax:
                    labels[sess][batch] = scipy_softmax(labels_curr, axis=-1)
                else:
                    labels[sess][batch] = labels_curr

        return {'labels': labels}
예제 #4
0
 def softmax(_input, der=False):
     if (der):
         pass
     return scipy_softmax(_input, axis=1)
예제 #5
0
# h_real = np.load('/Users/yh9277/Dropbox/ML Beam Alignment/Data/H_Matrices FineGrid/MISO_Static_FineGrid_Hmatrices_real.npy')
# h_imag = np.load('/Users/yh9277/Dropbox/ML Beam Alignment/Data/H_Matrices FineGrid/MISO_Static_FineGrid_Hmatrices_imag.npy')

h = h_real + 1j * h_imag
# norm_factor = np.max(np.power(abs(h),2))
norm_factor = np.max(abs(h))
h_scaled = h / norm_factor
h_concat_scaled = np.concatenate((h_real / norm_factor, h_imag / norm_factor),
                                 axis=1)

target_hard = np.argmax(np.power(
    np.absolute(np.matmul(h_scaled,
                          DFT_codebook(n_beam, n_antenna).conj().T)), 2),
                        axis=1)
target_softmax = scipy_softmax(np.power(
    np.absolute(np.matmul(h_scaled,
                          DFT_codebook(n_beam, n_antenna).conj().T)), 2),
                               axis=1)

train_idc, test_idc = train_test_split(np.arange(h.shape[0]), test_size=0.4)
val_idc, test_idc = train_test_split(test_idc, test_size=0.5)

x_train, y_train = h_concat_scaled[train_idc, :], target_hard[train_idc]
x_val, y_val = h_concat_scaled[val_idc, :], target_hard[val_idc]
x_test, y_test = h_concat_scaled[test_idc, :], target_hard[test_idc]

torch_x_train = torch.from_numpy(x_train)
torch_y_train = torch.from_numpy(y_train)
torch_x_val = torch.from_numpy(x_val)
torch_y_val = torch.from_numpy(y_val)
torch_x_test = torch.from_numpy(x_test)
torch_y_test = torch.from_numpy(y_test)
예제 #6
0
h_imag = np.load('D://Github Repositories/mmWave Beam Management/H_Matrices FineGrid/MISO_Static_FineGrid_Hmatrices_imag.npy')[:,antenna_sel]
loc = np.load('D://Github Repositories/mmWave Beam Management/H_Matrices FineGrid/MISO_Static_FineGrid_UE_location.npy')
# h_real = np.load('/Users/yh9277/Dropbox/ML Beam Alignment/Data/H_Matrices FineGrid/MISO_Static_FineGrid_Hmatrices_real.npy')
# h_imag = np.load('/Users/yh9277/Dropbox/ML Beam Alignment/Data/H_Matrices FineGrid/MISO_Static_FineGrid_Hmatrices_imag.npy')

h = h_real + 1j*h_imag
# norm_factor = np.max(np.power(abs(h),2))
norm_factor = np.max(abs(h))
h_scaled = h/norm_factor
h_concat_scaled = np.concatenate((h_real/norm_factor,h_imag/norm_factor),axis=1)

# target_codebook = DFT_codebook(n_beam,n_antenna)
target_codebook = DFT_beam(n_antenna, [np.pi/8, np.pi/4])

target_hard = np.argmax(np.power(np.absolute(np.matmul(h_scaled, target_codebook.conj().T)),2),axis=1)
target_softmax = scipy_softmax(np.power(np.absolute(np.matmul(h_scaled, target_codebook.conj().T)),2),axis=1)
# target_hard = target_hard > 1
# target_hard = target_hard.astype(int)

train_idc, test_idc = train_test_split(np.arange(h.shape[0]),test_size=0.4)
val_idc, test_idc = train_test_split(test_idc,test_size=0.5)


x_train,y_train = h_concat_scaled[train_idc,:],target_hard[train_idc]
x_val,y_val = h_concat_scaled[val_idc,:],target_hard[val_idc]
x_test,y_test = h_concat_scaled[test_idc,:],target_hard[test_idc]

torch_x_train = torch.from_numpy(x_train)
torch_y_train = torch.from_numpy(y_train)
torch_x_val = torch.from_numpy(x_val)
torch_y_val = torch.from_numpy(y_val)