def compute_score_matrix(
    n=64,
    binary_score=[1, 0, -3, 0
                  ]):  #binary_score=[true_pos, false_neg, false_pos, true_neg]
    score_matrix = np.zeros((n, n))
    for c_true in range(n):
        for c_pred in range(n):
            score_matrix[c_true, c_pred] = score(
                class_to_configuration(c_true, verbose=False),
                class_to_configuration(c_pred, verbose=False), binary_score)

    return score_matrix
Beispiel #2
0
     nTrial, nCh = y_test_level2.shape[:2]
 
     n_iter=50
     fpr = np.zeros(n_iter)
     tpr = np.zeros(n_iter)    
     for i_iter, iter_i in enumerate(itertools.product(np.arange(-3,0,0.3),np.arange(0,1,0.2))):
         print "Building score matrix"
         print iter_i
         binary_score = [1,0,iter_i[0],iter_i[1]]
         score_matrix = compute_score_matrix(n=64, binary_score=binary_score)
         
         print "Compute prediction according to the score matrix"
         y_pred = np.array([best_decision(prob_configuration, score_matrix=score_matrix)[0] for prob_configuration in predicted_probability])
        
         y_pred_conf = []
         y_pred_conf += [class_to_configuration(y_pred[i_trial], verbose=False) for i_trial in range(nTrial)]
         y_pred_conf = np.array(y_pred_conf)
         
         print "Confusion matrices"
         conf_mat = np.zeros([2,2])
         n_conect = np.array(y_test_level2.sum(-1).sum(-1), dtype=np.float)   
         n_noconect = np.repeat(nCh*(nCh-1), nTrial) - n_conect
         true_pos = np.zeros(nTrial)
         false_pos = np.zeros(nTrial)
         false_neg = np.zeros(nTrial)
         true_neg = np.zeros(nTrial)
         
         for i_trial in range(nTrial):
             true_pos[i_trial] = np.logical_and(y_test_level2[i_trial], y_pred_conf[i_trial]).sum() 
             false_pos[i_trial] = np.logical_and( np.logical_xor(y_test_level2[i_trial], y_pred_conf[i_trial]), y_pred_conf[i_trial]).sum() - nCh #to remove the diagonal
             false_neg[i_trial] = np.logical_and( np.logical_xor(y_test_level2[i_trial], y_pred_conf[i_trial]), y_test_level2[i_trial]).sum()
Beispiel #3
0
    print "X:", X.shape
    print "Computing score."
    nComb = order_combinations.shape[0]
    cv = StratifiedKFold(y, n_folds=n_folds)
    optimal_decision = True  #decidion based on a predefined score_matrix
    binary_class = True

    if binary_class:

        score_matrix = np.array(
            [[1, 0], [0, 1]], dtype=int
        )  #np.array([[0,-3],[0,1]], dtype=int)#score assigned to false/true positive and false/true negative
        y_new = [
        ]  #conversion from int representation of the class to binary matrix representation
        y_new += [(class_to_configuration(y_i, verbose=False))
                  for y_i in np.squeeze(y)]
        y_new = np.array(y_new, dtype=int)
        y_pred = np.zeros(y_new.shape, dtype=int)
        predicted_probability = np.zeros((X.shape[0], 6, 2))

        for i, (train, test) in enumerate(cv):
            print "Fold %d" % i
            index_x = np.append(
                np.triu_indices(3, 1)[0],
                np.tril_indices(3, -1)[0])
            index_y = np.append(
                np.triu_indices(3, 1)[1],
                np.tril_indices(3, -1)[1])

            for j in range(len(index_x)):
    # This is the previous code in vectorized format:
    scores = (score_matrix * prob_configuration[:, None]).sum(0)

    best = scores.argmax()
    return best, scores


if __name__ == '__main__':

    np.random.seed(0)

    from create_trainset import class_to_configuration

    c_true = 1
    c_pred = 2
    configuration_true = class_to_configuration(c_true)
    configuration_pred = class_to_configuration(c_pred)

    print "Score:", score(configuration_true, configuration_pred)

    score_matrix = compute_score_matrix()

    # prob_configuration = np.random.rand(64)
    # prob_configuration /= prob_configuration.sum()
    # prob_configuration = np.random.dirichlet(alpha=np.ones(64))
    prob_configuration = np.random.dirichlet(alpha=np.arange(64)**2)

    print "Given", prob_configuration
    best, scores = best_decision(prob_configuration, score_matrix=score_matrix)
    print "The score of each decision is:", scores
    print "The best decision is:", best
Beispiel #5
0
    print "X train:", X_train.shape
    print "X test", X.shape
    print "Learning and prediction."
    cv_train = StratifiedKFold(y_train, n_folds=n_folds)
    cv_test = StratifiedKFold(y, n_folds=n_folds)
    cv = izip(cv_train, cv_test)
    
    binary_class = True
    nComb = order_combinations.shape[0]
    nTrial, nCh = y_level2_conf.shape[:2]

    if binary_class:
            
        score_matrix = np.array([[1,0],[0,1]], dtype=int) #cost assigned to [true_pos, false_neg, false_pos, true_neg]
        y_new = []
        y_new += [(class_to_configuration(y_i, verbose=False)) for y_i in np.squeeze(y_train)] 
        y_new = np.array(y_new, dtype=int)
        y_pred = np.zeros([y.shape[0],nCh,nCh], dtype=int)
        predicted_probability = np.zeros((X.shape[0], 6, 2))
        
        for i, (train, test) in enumerate(cv):
            print "Fold %d" % i
            train = train[0] #train part of the train set
            test = test[1] #test part of the test set
            
            index_x = np.append(np.triu_indices(nCh,1)[0], np.tril_indices(nCh,-1)[0])
            index_y = np.append(np.triu_indices(nCh,1)[1], np.tril_indices(nCh,-1)[1])
                        
            for j in range(len(index_x)):
                print "Train."   
                clf.fit(X_train[train], y_new[train,index_x[j],index_y[j]])