Esempio n. 1
0
    def __init__(self, num_features, init_method, init_alpha=1., 
        is_beta_used = True):
        super(ParametricNet, self).__init__()

        # weights
        W = torch.randn(num_features, 1) 
        self.W = opt.initialization(init_method, W)

        one =  torch.FloatTensor(np.array([1]))/init_alpha
        self.alpha = torch.nn.Parameter( one ) 

        self.is_beta_used = is_beta_used
        if self.is_beta_used:
            one =  torch.FloatTensor(np.array([1.001]))/init_alpha
            self.beta = torch.nn.Parameter( one ) 
Esempio n. 2
0
    def fit(self,
            X,
            T,
            E,
            init_method='glorot_normal',
            lr=1e-2,
            max_iter=100,
            l2_reg=1e-2,
            alpha=0.95,
            tol=1e-3,
            verbose=True):
        """
        Fitting a proportional hazards regression model using
        the Efron's approximation method to take into account tied times.
        
        As the Hessian matrix of the log-likelihood can be 
        calculated without too much effort, the model parameters are 
        computed using the Newton_Raphson Optimization scheme:
                W_new = W_old - lr*<Hessian^(-1), gradient>
        
        Arguments:
        ---------
        * `X` : **array-like**, *shape=(n_samples, n_features)* --
            The input samples.

        * `T` : **array-like** -- 
            The target values describing when the event of interest or 
            censoring occurred.

        * `E` : **array-like** --
            The values that indicate if the event of interest occurred 
            i.e.: E[i]=1 corresponds to an event, and E[i] = 0 means censoring, 
            for all i.

        * `init_method` : **str** *(default = 'glorot_uniform')* -- 
            Initialization method to use. Here are the possible options:

            * `glorot_uniform`: Glorot/Xavier uniform initializer
            * `he_uniform`: He uniform variance scaling initializer
            * `uniform`: Initializing tensors with uniform (-1, 1) distribution
            * `glorot_normal`: Glorot normal initializer,
            * `he_normal`: He normal initializer.
            * `normal`: Initializing tensors with standard normal distribution
            * `ones`: Initializing tensors to 1
            * `zeros`: Initializing tensors to 0
            * `orthogonal`: Initializing tensors with a orthogonal matrix,
            
        * `lr`: **float** *(default=1e-4)* -- 
            learning rate used in the optimization

        * `max_iter`: **int** *(default=100)* -- 
            The maximum number of iterations in the Newton optimization

        * `l2_reg`: **float** *(default=1e-4)* -- 
            L2 regularization parameter for the model coefficients

        * `alpha`: **float** *(default=0.95)* -- 
            Confidence interval

        * `tol`: **float** *(default=1e-3)* -- 
            Tolerance for stopping criteria

        * `verbose`: **bool** *(default=True)* -- 
            Whether or not producing detailed logging about the modeling
 
        Example:
        --------

        #### 1 - Importing packages
        import numpy as np
        import pandas as pd
        from matplotlib import pyplot as plt
        from sklearn.model_selection import train_test_split
        from pysurvival.models.simulations import SimulationModel
        from pysurvival.models.semi_parametric import CoxPHModel
        from pysurvival.utils.metrics import concordance_index
        from pysurvival.utils.display import integrated_brier_score
        #%pylab inline  # To use with Jupyter notebooks


        #### 2 - Generating the dataset from a Log-Logistic parametric model
        # Initializing the simulation model
        sim = SimulationModel( survival_distribution = 'log-logistic',  
                               risk_type = 'linear',
                               censored_parameter = 10.1, 
                               alpha = 0.1, beta=1.2 )

        # Generating N random samples 
        N = 1000
        dataset = sim.generate_data(num_samples = N, num_features = 3)

        #### 3 - Creating the modeling dataset
        # Defining the features
        features = sim.features

        # Building training and testing sets #
        index_train, index_test = train_test_split( range(N), test_size = 0.2)
        data_train = dataset.loc[index_train].reset_index( drop = True )
        data_test  = dataset.loc[index_test].reset_index( drop = True )

        # Creating the X, T and E input
        X_train, X_test = data_train[features], data_test[features]
        T_train, T_test = data_train['time'].values, data_test['time'].values
        E_train, E_test = data_train['event'].values, data_test['event'].values


        #### 4 - Creating an instance of the Cox PH model and fitting the data.
        # Building the model
        coxph = CoxPHModel()
        coxph.fit(X_train, T_train, E_train, lr=0.5, l2_reg=1e-2, 
            init_method='zeros')


        #### 5 - Cross Validation / Model Performances
        c_index = concordance_index(coxph, X_test, T_test, E_test) #0.92
        print('C-index: {:.2f}'.format(c_index))

        ibs = integrated_brier_score(coxph, X_test, T_test, E_test, t_max=10, 
                    figure_size=(20, 6.5) )

        References:
        -----------
        * https://en.wikipedia.org/wiki/Proportional_hazards_model#Tied_times
        * Efron, Bradley (1974). "The Efficiency of Cox's Likelihood 
          Function for Censored Data". Journal of the American Statistical 
          Association. 72 (359): 557-565. 
        """

        # Collecting features names
        N, self.num_vars = X.shape
        if isinstance(X, pd.DataFrame):
            self.variables = X.columns.tolist()
        else:
            self.variables = ['x_{}'.format(i) for i in range(self.num_vars)]

        # Checking the format of the data
        X, T, E = utils.check_data(X, T, E)
        order = np.argsort(-T)
        T = T[order]
        E = E[order]
        X = self.scaler.fit_transform(X[order, :])
        self.std_scale = np.sqrt(self.scaler.var_)

        # Initializing the model
        self.model = _CoxPHModel()

        # Creating the time axis
        self.model.get_times(T, E)

        # Initializing the parameters
        W = np.zeros(self.num_vars)
        W = opt.initialization(init_method, W, False).flatten()
        W = W.astype(np.float64)

        # Optimizing to find best parameters
        epsilon = 1e-9
        self.model.newton_optimization(X, T, E, W, lr, l2_reg, tol, epsilon,
                                       max_iter, verbose)

        # Saving the Cython attributes in the Python object
        self.weights = np.array(self.model.W)
        self.loss = self.model.loss
        self.times = np.array(self.model.times)
        self.gradient = np.array(self.model.gradient)
        self.Hessian = np.array(self.model.Hessian)
        self.inv_Hessian = np.array(self.model.inv_Hessian)
        self.loss_values = np.array(self.model.loss_values)
        self.grad2_values = np.array(self.model.grad2_values)

        # Computing baseline functions
        score = np.exp(np.dot(X, self.weights))
        baselines = _baseline_functions(score, T, E)

        # Saving the Cython attributes in the Python object
        self.baseline_hazard = np.array(baselines[1])
        self.baseline_survival = np.array(baselines[2])
        del self.model
        self.get_time_buckets()

        # Calculating summary
        self.get_summary(alpha)

        return self
Esempio n. 3
0
    def __init__(self,
                 input_size,
                 output_size,
                 structure,
                 init_method,
                 batch_normalization=True,
                 bn_and_droupout=False):

        # Initializing the model
        super(NeuralNet, self).__init__()

        # Initializing the list of layers
        self.layers = []

        if structure is not None and structure != []:

            # Checking if structure is dict
            if isinstance(structure, dict):
                structure = [structure]

            # Building the hidden layers
            for hidden in structure:

                # Extracting the hidden layer parameters
                hidden_size = int(hidden.get('num_units'))
                activation = hidden.get('activation')
                alpha = hidden.get('alpha')
                dropout = hidden.get('dropout')

                # Fully connected layer
                fully_conn = nn.Linear(input_size, hidden_size)
                fully_conn.weight = opt.initialization(init_method,
                                                       fully_conn.weight)
                fully_conn.bias = opt.initialization(init_method,
                                                     fully_conn.bias)
                self.layers.append(fully_conn)

                if not bn_and_droupout:
                    # Batch Normalization
                    if batch_normalization:
                        self.layers.append(torch.nn.BatchNorm1d(hidden_size))

                    # Activation
                    self.layers.append(
                        activation_function(activation, alpha=alpha))

                    # Dropout
                    if (dropout is not None or 0. < dropout <= 1.) and \
                    not batch_normalization :
                        self.layers.append(torch.nn.Dropout(dropout))

                else:
                    # Batch Normalization
                    if batch_normalization:
                        self.layers.append(torch.nn.BatchNorm1d(hidden_size))

                    # Activation
                    self.layers.append(
                        activation_function(activation, alpha=alpha))

                    # Dropout
                    if (dropout is not None or 0. < dropout <= 1.):
                        self.layers.append(torch.nn.Dropout(dropout))

                # Next layer
                input_size = hidden_size

        # Fully connected last layer
        fully_conn = nn.Linear(input_size, output_size)
        fully_conn.weight = opt.initialization(init_method, fully_conn.weight)
        fully_conn.bias = opt.initialization(init_method, fully_conn.bias)
        self.layers.append(fully_conn)

        # Putting the model together
        self.model = nn.Sequential(*self.layers).train()
Esempio n. 4
0
    def fit(self,
            X,
            T,
            E,
            with_bias=True,
            init_method='glorot_normal',
            lr=1e-2,
            max_iter=100,
            l2_reg=1e-4,
            tol=1e-3,
            verbose=True):
        """
        Fitting a Survival Support Vector Machine model.

        As the Hessian matrix of the log-likelihood can be 
        calculated without too much effort, the model parameters are 
        computed using the Newton_Raphson Optimization scheme:
                W_new = W_old - lr*<Hessian^(-1), gradient>

        Arguments:
        ---------
        
        * `X` : array-like, shape=(n_samples, n_features)
            The input samples.

        * `T` : array-like, shape = [n_samples] 
            The target values describing when the event of interest or censoring
            occurred

        * `E` : array-like, shape = [n_samples] 
            The Event indicator array such that E = 1. if the event occurred
            E = 0. if censoring occurred

        * `with_bias`: bool (default=True)
            Whether a bias should be added 

        * `init_method` : str (default = 'glorot_uniform')
            Initialization method to use. Here are the possible options:
                * 'glorot_uniform': Glorot/Xavier uniform initializer, 
                * 'he_uniform': He uniform variance scaling initializer
                * 'uniform': Initializing tensors with uniform (-1, 1) distribution
                * 'glorot_normal': Glorot normal initializer,
                * 'he_normal': He normal initializer.
                * 'normal': Initializing tensors with standard normal distribution
                * 'ones': Initializing tensors to 1
                * 'zeros': Initializing tensors to 0
                * 'orthogonal': Initializing tensors with a orthogonal matrix,

        * `lr`: float (default=1e-4)
            learning rate used in the optimization

        * `max_iter`: int (default=100)
            The maximum number of iterations in the Newton optimization

        * `l2_reg`: float (default=1e-4)
            L2 regularization parameter for the model coefficients

        * `alpha`: float (default=0.95)
            Confidence interval

        * `tol`: float (default=1e-3)
            Tolerance for stopping criteria

        * `verbose`: bool (default=True)
            Whether or not producing detailed logging about the modeling


        Example:
        --------

        #### 1 - Importing packages
        import numpy as np
        import pandas as pd
        from pysurvival.models.svm import LinearSVMModel
        from pysurvival.models.svm import KernelSVMModel
        from pysurvival.models.simulations import SimulationModel
        from pysurvival.utils.metrics import concordance_index
        from sklearn.model_selection import train_test_split
        from scipy.stats.stats import pearsonr   
        # %pylab inline # to use in jupyter notebooks

        #### 2 - Generating the dataset from the parametric model
        # Initializing the simulation model
        sim = SimulationModel( survival_distribution = 'Log-Logistic',  
                               risk_type = 'linear',
                               censored_parameter = 1.1, 
                               alpha = 1.5, beta = 4)

        # Generating N Random samples
        N = 1000
        dataset = sim.generate_data(num_samples = N, num_features = 4)

        #### 3 - Splitting the dataset into training and testing sets
        # Defining the features
        features = sim.features

        # Building training and testing sets #
        index_train, index_test = train_test_split( range(N), test_size = 0.2)
        data_train = dataset.loc[index_train].reset_index( drop = True )
        data_test  = dataset.loc[index_test].reset_index( drop = True )

        # Creating the X, T and E input
        X_train, X_test = data_train[features], data_test[features]
        T_train, T_test = data_train['time'].values, data_test['time'].values
        E_train, E_test = data_train['event'].values, data_test['event'].values


        #### 4 - Creating an instance of the SVM model and fitting the data.
        svm_model = LinearSVMModel()
        svm_model = KernelSVMModel(kernel='Gaussian', scale=0.25)
        svm_model.fit(X_train, T_train, E_train, init_method='he_uniform', 
            with_bias = True, lr = 0.5,  tol = 1e-3,  l2_reg = 1e-3)

        #### 5 - Cross Validation / Model Performances
        c_index = concordance_index(svm_model, X_test, T_test, E_test) #0.93
        print('C-index: {:.2f}'.format(c_index))

        #### 6 - Comparing the model predictions to Actual risk score
        # Comparing risk scores
        svm_risks = svm_model.predict_risk(X_test)
        actual_risks = sim.predict_risk(X_test).flatten()
        print("corr={:.4f}, p_value={:.5f}".format(*pearsonr(svm_risks, 
            actual_risks)))# corr=-0.9992, p_value=0.00000

        """

        # Collecting features names
        N, self.num_vars = X.shape
        if isinstance(X, pd.DataFrame):
            self.variables = X.columns.tolist()
        else:
            self.variables = ['x_{}'.format(i) for i in range(self.num_vars)]

        # Adding a bias or not
        self.with_bias = with_bias
        if with_bias:
            self.variables += ['intercept']
        p = int(self.num_vars + 1. * with_bias)

        # Checking the format of the data
        X, T, E = utils.check_data(X, T, E)

        if with_bias:
            # Adding the intercept
            X = np.c_[X, [1.] * N]
        X = self.scaler.fit_transform(X)

        # Initializing the parameters
        if self.kernel_type == 0:
            W = np.zeros((p, 1))
        else:
            W = np.zeros((N, 1))
        W = opt.initialization(init_method, W, False).flatten()
        W = W.astype(np.float64)

        # Optimizing to find best parameters
        self.model.newton_optimization(X, T, E, W, lr, l2_reg, tol, max_iter,
                                       verbose)
        self.save_properties()

        return self
Esempio n. 5
0
    def __init__(self,
                 input_size,
                 output_size,
                 structure,
                 init_method,
                 dropout=None,
                 batch_normalization=True,
                 bn_and_droupout=False):

        # Initializing the model
        super(NeuralNet, self).__init__()

        # Initializing the list of layers
        self.layers = []

        if structure is not None and structure != []:

            # Checking if structure is dict
            if isinstance(structure, dict):
                structure = [structure]

            # Building the hidden layers
            self.inputs = []
            for hidden in structure:
                output_size_ = 0
                if isinstance(hidden, list):
                    print("is list")
                    layers = []
                    for input_index, input in enumerate(hidden):
                        # Extracting the hidden layer parameter
                        activation = input.get('activation')
                        alpha = input.get('alpha')
                        input_layer = input.get("input", "")

                        if input.get("type") == "linear":
                            # Fully connected layer
                            hidden_size = int(input.get('num_units'))
                            fully_conn = nn.Linear(input_size[input_index],
                                                   hidden_size)
                            fully_conn.weight = opt.initialization(
                                init_method, fully_conn.weight)
                            fully_conn.bias = opt.initialization(
                                init_method, fully_conn.bias)
                            if input_layer:
                                self.inputs.append(fully_conn)
                            layers.append(fully_conn)
                            output_size_ += hidden_size

                        elif input.get("type") == "embedding":
                            num_embeddings = input.get("num_embeddings")
                            embedding_dim = input.get("embedding_dim")

                            fully_conn = nn.Embedding(num_embeddings,
                                                      embedding_dim)
                            fully_conn.weight = opt.initialization(
                                init_method, fully_conn.weight)
                            # fully_conn.bias = opt.initialization(init_method,
                            #                                     fully_conn.bias)
                            if input_layer:
                                self.inputs.append(fully_conn)
                            layers.append(fully_conn)
                            #layers.append(nn.Flatten())
                            output_size_ += embedding_dim * input_size[
                                input_index]
                    self.layers.append(layers)
                    hidden_size = output_size_

                else:
                    print("is not list")
                    # Extracting the hidden layer parameters

                    activation = hidden.get('activation')
                    alpha = hidden.get('alpha')
                    input_layer = hidden.get("input", "")

                    if hidden.get("type") == "linear":
                        # Fully connected layer
                        hidden_size = int(hidden.get('num_units'))
                        print(f"{input_size} | {hidden_size}")
                        if type(input_size) is int:
                            fully_conn = nn.Linear(input_size, hidden_size)
                        else:
                            fully_conn = nn.Linear(input_size[0], hidden_size)
                        fully_conn.weight = opt.initialization(
                            init_method, fully_conn.weight)
                        fully_conn.bias = opt.initialization(
                            init_method, fully_conn.bias)
                        if input_layer:
                            self.inputs.append(fully_conn)
                        self.layers.append(fully_conn)

                    elif hidden.get("type") == "embedding":
                        num_embeddings = hidden.get("num_embeddings")
                        embedding_dim = hidden.get("embedding_dim")

                        fully_conn = nn.Embedding(num_embeddings,
                                                  embedding_dim)
                        fully_conn.weight = opt.initialization(
                            init_method, fully_conn.weight)
                        #fully_conn.bias = opt.initialization(init_method,
                        #                                     fully_conn.bias)
                        if input_layer:
                            self.inputs.append(fully_conn)
                        self.layers.append(fully_conn)
                        hidden_size = input_size[0] * embedding_dim
                        #self.layers.append(nn.Flatten())

                if not bn_and_droupout:
                    # Batch Normalization
                    if batch_normalization:
                        self.layers.append(torch.nn.BatchNorm1d(hidden_size))

                    # Activation
                    self.layers.append(
                        activation_function(activation, alpha=alpha))

                    # Dropout
                    if (dropout is not None or 0. < dropout <= 1.) and \
                    not batch_normalization :
                        self.layers.append(torch.nn.Dropout(dropout))

                else:
                    # Batch Normalization
                    if batch_normalization:
                        self.layers.append(torch.nn.BatchNorm1d(hidden_size))

                    # Activation
                    self.layers.append(
                        activation_function(activation, alpha=alpha))

                    # Dropout
                    if (dropout is not None or 0. < dropout <= 1.):
                        self.layers.append(torch.nn.Dropout(dropout))

                    # Next layer
                input_size = hidden_size

        # Fully connected last layer
        else:
            input_size = input_size[0]
        fully_conn = nn.Linear(input_size, output_size)
        fully_conn.weight = opt.initialization(init_method, fully_conn.weight)
        fully_conn.bias = opt.initialization(init_method, fully_conn.bias)
        self.layers.append(fully_conn)

        # Putting the model together
        #self.model = nn.Sequential(*self.layers).train()
        flat_list = []
        for layer in self.layers:
            flat_list += recr_process(layer)

        self.model = nn.ModuleList(flat_list)