Beispiel #1
0
 #C_values=np.power(10,np.linspace(-2,2,2),dtype=float)
 #G_values=np.power(10,np.linspace(-1,1,2),dtype=float)
 #Hyper_values=np.power(10,np.linspace(-1,1,2),dtype=float)
 nbclasses = 10
 kt = 5
 kf = 5
 tol = np.power(10, -6, dtype=float)
 InitStrat = "Multiplicative"  #The other possibility is "Tucker2HOSVD", but for this latter initialization, we are in the undercomplete case
 maximum_iterations = 20
 C_values = np.power(10, np.linspace(-5, 4, 1), dtype=float)
 #np.power(10,np.linspace(-5,5,10),dtype=float)
 G_values = np.power(10, np.linspace(-4, 3, 1), dtype=float)
 #np.power(10,np.linspace(-4,4,8),dtype=float)
 Hyper_values = np.power(10, np.linspace(-10, -1, 1))
 C_svm, G_svm, hyperparam, matrixscore = Methods.Cross_val(
     Tensor_train, y_train, C_values, G_values, Hyper_values, nbclasses, kf,
     kt, maximum_iterations, tol, InitStrat)
 print(C_svm, G_svm)
 #We perform the decomposition of the training tensor
 #The decomposition yields:
 #The error related to each updating error_list;
 #The temporal and spectral dictionary components A_f and A_t;
 #The number of iterations and the activation coefficients G;
 #We dimension purpose, we can reduce the size of the tensor to be decomposed.This can be done in the following way:
 #Tensor_train=dtensor(Preprocessing.rescaling(Tensor_train,If,It)) where If and It correspond to the desired sizes.
 G, A_f, A_t, error_list, nb_iter = Methods.PenalizedTucker(
     Tensor_train, y_train, nbclasses, kf, kt, hyperparam,
     maximum_iterations, tol, InitStrat)
 #
 #We define the training features. They are obtained by vectorizing the matrices G[k,:,:]
 #Training_features=Methods.Training_features_extraction(G)
Beispiel #2
0
    #Hyper_values=np.power(10,np.linspace(-1,1,2),dtype=float)
    nbclasses = 15
    kt = 3
    kf = 3
    hyperparam = np.power(10, -3, dtype=float)
    tol = np.power(10, -6, dtype=float)
    InitStrat = "Multiplicative"  #The other possibility is "Tucker2HOSVD", but for this latter initialization, we are in the undercomplete case
    maximum_iterations = 10
    C_values = np.power(10, np.linspace(1, 10, 5), dtype=float)
    G_values = np.power(10, np.linspace(-2, 3, 3), dtype=float)
    Hyper_values = np.power(10, np.linspace(-8, -1, 4), dtype=float)
    parallel_decision = True
    pool = Pool(5)
    #C_svm,G_svm,hyperparam,matrixscore=Methods.Cross_val(Tensor_val,y_val,C_values,G_values,Hyper_values,nbclasses,kf,kt,maximum_iterations,tol,InitStrat)
    C_svm, G_svm, hyperparam, matrixscore = Methods.Cross_val(
        Tensor_val, y_val, C_values, G_values, Hyper_values, nbclasses, kf, kt,
        maximum_iterations, tol, InitStrat, parallel_decision, pool)

    #We perform the decomposition of the training tensor
    #The decomposition yields:
    #The error related to each updating error_list;
    #The temporal and spectral dictionary components A_f and A_t;
    #The number of iterations and the activation coefficients G;
    #We dimension purpose, we can reduce the size of the tensor to be decomposed.This can be done in the following way:
    #Tensor_train=dtensor(Preprocessing.rescaling(Tensor_train,If,It)) where If and It correspond to the desired sizes.

    G, A_f, A_t, error, nbiter = Methods.PenalizedTucker(
        Tensor_train, y_train, nbclasses, kf, kt, hyperparam,
        maximum_iterations, tol, InitStrat, parallel_decision, pool)

    print("Spot II")
ratio_test=0.20 #The number of test exmaples represents 20% of the total(500)
ratio_valid=0.10 #The number of validation exmaples represents 10% of the total(500)
seed1=5
seed2=5
Tensor_train,y_train,Tensor_test,y_test,Tensor_valid,y_valid=Methods.train_test_validation_tensors(TensorTFR,labels,ratio_test,ratio_valid,seed1,seed2)
#We perform the Cross_validation to determine the best values for the hyperparameters
#The definition of kf,kt amounts to the definition Jf and Jt because in our framework, Jf=nbclasses*kf. The same definition holds for Jt
C_values=np.power(10,np.linspace(-2,2,5),dtype=float)
G_values=np.power(10,np.linspace(-1,1,3),dtype=float)
Hyper_values=np.power(10,np.linspace(-1,1,3),dtype=float)
nbclasses=10
kt=2
kf=2
tol=np.power(10,-3,dtype=float)
maximum_iterations=10
C_svm,G_svm,hyperparam=Methods.Cross_val(Tensor_valid,y_valid,C_values,G_values,Hyper_values,nbclasses,kf,kt,maximum_iterations,tol)
#We perform the decomposition of the training tensor
#The decomposition yields:
    #The error related to each updating error_list;
    #The temporal and spectral dictionary components A_f and A_t;
    #The number of iterations and the activation coefficients G;
    #We dimension purpose, we can reduce the size of the tensor to be decomposed.This can be done in the following way:
        #Tensor_train=dtensor(Preprocessing.rescaling(Tensor_train,If,It)) where If and It correspond to the desired sizes.
G,A_f,A_t,error_list,nb_iter=Methods.PenalizedTucker(Tensor_train,y_train,nbclasses,kf,kt,hyperparam,maximum_iterations,tol)
pdb.set_trace()
#We define the training features. They are obtained by vectorizing the matrices G[k,:,:]
Training_features=Methods.Training_features_extraction(G)
#We define the test features
#If we resize the training tensor, it is obligatory to resize the test tensor for dimensionality coherence
    #This is done via the rescaling function defined in Preprocessing
Test_features=Methods.Test_features_extraction(Tensor_test,A_f,A_t)