Пример #1
0
 def init_ss_learner(self, learner=None, params={'alpha':0.1}):
     
     ###############################################################
     # Setup semi-supervised co-cluster learning models
     ###############################################################
     self.ss_models = np.empty((2,), dtype = 'object') # For Dyadic data
     self.cumsum_obj = np.zeros((2,))
     self.obj_ss = [np.empty((self.M,self.K)),np.empty((self.N,self.L))]
     for i in range(2):
         if type(params) is dict:
             self.ss_models[i] = learner_class(learner, params)
         else:
             self.ss_models[i] = learner_class(learner, params[i])
Пример #2
0
def train_process(iterable_data):
    #import sys
    #sys.path.insert(0, '/home/neeraj/.eclipse/org.eclipse.platform_3.8_155965261/plugins/org.python.pydev_2.7.1.2012100913/pysrc/')
    #import pydevd;pydevd.settrace()
    (rc_list,R,C,I,J,rowAttr,colAttr,Z,learner_params,learner,train_loss_type,num_observations,M,N) = iterable_data
    #pydevd.settrace()
    # Gives an error for Logistic because in the current version of sklearn coef_ cannot be written to
    # Initialize the model
    model = learner_class(learner, learner_params)
    print 'learning model'
    #model.model.coef_ = models_coefs[0]
    #model.model.intercept_ = models_coefs[1]
    
    # Filter out irrelevant rows
    r = rc_list[0]
    c = rc_list[1]
    row_index  = R[I]==r
    filtered_I = I[row_index]
    filtered_J = J[row_index]
    filtered_Z = Z[row_index]
    # Filter out irrelevant cols
    col_index  = C[filtered_J]==c
    filtered_I = filtered_I[col_index]
    filtered_Z = filtered_Z[col_index]
    filtered_J = filtered_J[col_index]
    if len(filtered_Z)>0:
        # Build the current covariates matrix
        # Extract and Normalize covariates
        flat_index = (sp.coo_matrix((np.arange(num_observations), (I, J)), shape=(M,N))).tocsr()        
        flat_index = np.array(flat_index[filtered_I, filtered_J], copy=False).ravel()
        covariates = np.hstack((rowAttr[filtered_I], colAttr[filtered_J]))            
        # call learner
        model.fit(covariates, filtered_Z)
        if train_loss_type == 'negloglik':
            total_error = train_loss_dict[train_loss_type](filtered_Z, model.model.predict_proba(covariates)).sum()
        else:
            total_error = train_loss_dict[train_loss_type](filtered_Z, model.model.predict(covariates)).sum()
    
    return total_error,r,c,[model.model.coef_,model.model.intercept_]  
Пример #3
0
 def init_learner(self, learner=None, params={'alpha':0.1}, train_loss=None, test_loss=None):
     
     ###############################################################
     # Setup Output learning models
     ###############################################################
     if learner is None: # uses scikits.learn internal bias
         if self.D>0:
             learner = "ridge"
         else:
             learner = "mean"
     self.models = np.empty((self.K, self.L), dtype = 'object')
     for r in range(self.K):
         for c in range(self.L):
             self.models[r,c] = learner_class(learner, params)
     
     if train_loss is None:
         train_loss = "sq_err"
     if test_loss is None:
         test_loss = "mse"
     self.train_loss_type = train_loss
     self.train_loss = train_loss_dict[train_loss]
     self.test_loss = test_loss_dict[test_loss]
     self.learner = learner
     self.learner_params = params