def Newton_F(Oracle, x0):

    ##### Initialisation des variables

    iter_max = 100
    gradient_step = 1
    threshold = 0.000001

    gradient_norm_list = []
    gradient_step_list = []
    critere_list = []

    time_start = process_time()

    x = x0

    ##### Boucle sur les iterations

    for k in range(iter_max):

        # Valeur du critere et du gradient
        critere, gradient, hessien = Oracle(x, 7)

        # Test de convergence
        gradient_norm = norm(gradient)
        if gradient_norm <= threshold:
            break

        # Direction de descente
        D = -dot(inv(hessien), gradient)
        print(gradient_norm)

        # Mise a jour des variables
        x = x + (gradient_step * D)

        # Evolution du gradient, du pas, et du critere
        gradient_norm_list.append(gradient_norm)
        gradient_step_list.append(gradient_step)
        critere_list.append(critere)

    ##### Resultats de l'optimisation

    critere_opt = critere
    gradient_opt = gradient
    x_opt = x
    time_cpu = process_time() - time_start

    print()
    print('Iteration :', k)
    print('Temps CPU :', time_cpu)
    print('Critere optimal :', critere_opt)
    print('Norme du gradient :', norm(gradient_opt))

    # Visualisation de la convergence
    Visualg(gradient_norm_list, gradient_step_list, critere_list)

    return critere_opt, gradient_opt, x_opt
Beispiel #2
0
def get_xy(file_conllu, file_features, file_embedding=None):
    mcd = get_mcd()

    print("Chargement des arbres")
    obj_generateAlltree = ConstructAllTree(file_conllu, mcd, True)
    # print(obj_generateAlltree.get_corpus())
    # print(obj_generateAlltree.get_vocabulary())

    all_tree = obj_generateAlltree.get_allTreeProjectiviser()
    # print(all_tree[0].print_tree())
    print("Arbres charger : ", len(all_tree))

    print("Création du dataset")
    features = Features(file_features)
    i = 0
    for tree in all_tree:

        # tree.print_tree()
        # if i != 43 and i != 61:
        A = Oracle(tree, features)
        A.run()
    #
    # print(features.datas)
    #
    # print(features.labels_encoders)
    print("Convertion du dataset")
    print("file_embedding : ", file_embedding)
    X, Y = features.get_Data_Set(file_embedding)

    labels_encoderX = features.get_label_encoderX()
    labels_encoderY = features.get_label_encoderY()

    print("X_train_shape", X.shape)
    print("Y_train_shape", Y.shape)

    return X, Y, labels_encoderX, labels_encoderY, all_tree
Beispiel #3
0
def Newton_V(Oracle, x0):
    ##### Initialisation des variables

    iter_max = 100
    # gradient_step_ini = 1. # Problème primal.
    gradient_step_ini = 1000  # Problème dual.
    threshold = 0.000001

    error_count = 0  # Compteur de non-convergence de l'algorithme de Fletcher-Lemarechal.

    gradient_norm_list = []
    gradient_step_list = []
    critere_list = []

    time_start = process_time()

    x = x0

    ##### Boucle sur les iterations
    for k in range(iter_max):
        # Valeur du critere et du gradient
        critere, gradient, hessien = Oracle(x, 7)

        # Test de convergence
        gradient_norm = norm(gradient)
        if gradient_norm <= threshold:
            break

        # Direction de descente
        direction = -dot(inv(hessien), gradient)

        # Pas de descente
        gradient_step, error_code = Wolfe(gradient_step_ini, x, direction,
                                          Oracle)

        if error_code != 1:
            error_count += 1

        # Mise a jour des variables
        x = x + (gradient_step * direction)

        # Evolution du gradient, du pas, et du critere
        gradient_norm_list.append(gradient_norm)
        gradient_step_list.append(gradient_step)
        critere_list.append(critere)

    if error_count > 0:
        print()
        print("Non-convergence de l'algorithme de Fletcher-Lemarechal : {}".
              format(error_count))

    ##### Resultats de l'optimisation
    critere_opt = critere
    gradient_opt = gradient
    x_opt = x
    time_cpu = process_time() - time_start

    print()
    print('Iteration :', k)
    print('Temps CPU :', time_cpu)
    print('Critere optimal :', critere_opt)
    print('Norme du gradient :', norm(gradient_opt))

    # Visualisation de la convergence
    Visualg(gradient_norm_list, gradient_step_list, critere_list)

    return critere_opt, gradient_opt, x_opt,
Beispiel #4
0
    data_filename=data_dir,
    batch_size=batch_size,
    sequence_length=sequence_length,
    validation_split=validation_split,
    fake_batch_size=discriminator_pre_training_fake_batch_size,
    seed=seed,
    data_type="val",
    use_word_vectors=use_word_vectors)

# Initialize Models
oracle = Oracle(train_data_loader=train_dl,
                validation_data_loader=val_dl,
                units=oracle_hidden_units,
                leaky_relu_alpha=oracle_leaky_relu_alpha,
                num_layers=oracle_layers,
                opt=oracle_optimizer,
                dropout_keep_prob=oracle_dropout_keep_prob,
                l2_reg_lambda=oracle_l2_regularization_lambda,
                sequence_length=sequence_length,
                loss=oracle_loss,
                metrics=oracle_metrics)
gen = Generator(train_data_loader=train_dl,
                validation_data_loader=val_dl,
                units=generator_hidden_units,
                leaky_relu_alpha=generator_leaky_relu_alpha,
                num_layers=generator_layers,
                opt=generator_optimizer,
                dropout_keep_prob=generator_dropout_keep_prob,
                l2_reg_lambda=generator_l2_regularization_lambda,
                sequence_length=sequence_length,
                loss=generator_loss,
Beispiel #5
0
def BFGS(Oracle, x0):
    ##### Initialisation des variables

    iter_max = 10000
    # gradient_step_ini = 1.  # Problème primal.
    gradient_step_ini = 1000.  # Problème dual.
    threshold = 0.000001

    error_count = 0  # Compteur de non-convergence de l'algorithme de Fletcher-Lemarechal.

    gradient_norm_list = []
    gradient_step_list = []
    critere_list = []

    time_start = process_time()

    x = x0

    ##### Boucle sur les iterations
    for k in range(iter_max):
        # Nouvelles valeurs du critere et du gradient
        critere, gradient = Oracle(x, 4)

        # Test de convergence
        gradient_norm = norm(gradient)
        if gradient_norm <= threshold:
            break

        # Direction de descente
        if k == 0:
            W = np.eye(len(gradient))
        else:
            delta_x = x - x_p
            delta_g = gradient - gradient_p
            delta_mat_1 = np.outer(delta_x, delta_g) / np.vdot(
                delta_g, delta_x)
            delta_mat_2 = np.outer(delta_x, delta_x) / np.vdot(
                delta_g, delta_x)
            I = np.eye(len(gradient))  # Matrice identité
            W = np.dot(np.dot(I - delta_mat_1, W_p),
                       I - np.transpose(delta_mat_1)) + delta_mat_2
        direction = np.dot(-W, gradient)

        # Pas de descente
        gradient_step, error_code = Wolfe(gradient_step_ini, x, direction,
                                          Oracle)

        if error_code != 1:
            error_count += 1

        # Mise a jour des variables
        x_p = x  # Valeur précédente de la position
        gradient_p = gradient  # Valeur précédente du gradient
        direction_p = direction  # Valeur précédente de la direction
        W_p = W
        x = x + (gradient_step * direction)

        # Evolution du gradient, du pas, et du critere
        gradient_norm_list.append(gradient_norm)
        gradient_step_list.append(gradient_step)
        critere_list.append(critere)

    if error_count > 0:
        print()
        print("Non-convergence de l'algorithme de Fletcher-Lemarechal : {}".
              format(error_count))

    ##### Resultats de l'optimisation
    critere_opt = critere
    gradient_opt = gradient
    x_opt = x
    time_cpu = process_time() - time_start

    print()
    print('Iteration :', k)
    print('Temps CPU :', time_cpu)
    print('Critere optimal :', critere_opt)
    print('Norme du gradient :', norm(gradient_opt))

    # Visualisation de la convergence
    Visualg(gradient_norm_list, gradient_step_list, critere_list)

    return critere_opt, gradient_opt, x_opt,