Exemplo n.º 1
0
def plot_error(datax, datay, f, step=10):
    grid, x1list, x2list = make_grid(xmin=-4, xmax=4, ymin=-4, ymax=4)
    plt.contourf(
        x1list, x2list,
        np.array([f(datax, datay, w) for w in grid]).reshape(x1list.shape), 25)
    plt.colorbar()
    plt.show()
Exemplo n.º 2
0
def gaussian_transformation(X, sigma):
    grid, _, _ = make_grid(X, step=isqrt(X.shape[1]))
    new_X = np.zeros((X.shape[0], grid.shape[0]))
    for i in range(X.shape[0]):
        for j in range(grid.shape[0]):
            new_X[i][j] = gaussian(X[i], grid[j], sigma)
    return new_X
Exemplo n.º 3
0
def plot_frontiere_proba(data, f, step=20):
    """FIXME! briefly describe function

    :param data: Données pour lesquels affichées la frontière.
    :param f: fonction de prédiction
    :param step:
    :returns:
    :rtype:

    """

    grid, x, y = make_grid(data=data, step=step)
    plt.contourf(x, y, f(grid).reshape(x.shape), 255)
Exemplo n.º 4
0
def plot_trajectory(datax,datay,perceptron,step=10):
    plt.figure()
    w_histo, f_histo, grad_histo = perceptron.fit(datax,datay)
    xmax, xmin = np.max(w_histo[:,0]), np.min(w_histo[:,0])
    ymax, ymin = np.max(w_histo[:,1]), np.min(w_histo[:,1])
    dev_x, dev_y = abs(xmax-xmin)/4, abs(ymax-ymin)/4 # defines a margin for border
    dev_x += int(dev_x == 0)*5 # avoid dev_x = 0
    dev_y += int(dev_y == 0)*5
    grid,x1list,x2list=make_grid(xmin=xmin-dev_x,xmax=xmax+dev_y,ymin=ymin-dev_y,ymax=ymax+dev_y)
    plt.contourf(x1list,x2list,np.array([perceptron.loss(datax,datay,w)\
                                         for w in grid]).reshape(x1list.shape),25)
    plt.colorbar()
    plt.scatter(w_histo[:,0], w_histo[:,1], marker='+', color='black')
    plt.show()
Exemplo n.º 5
0
def plot_error(datax, datay, f, step=10):
    """ Tracer des isocourbes de l'erreur

    :param datax: Contient les différents exemples des données
    :param datay: Labels des exemples
    :param f:
    :param step:

    """

    grid, x1list, x2list = make_grid(data=datax,
                                     xmin=-4,
                                     xmax=4,
                                     ymin=-4,
                                     ymax=4)
    plt.contourf(
        x1list, x2list,
        np.array([f(datax, datay, w) for w in grid]).reshape(x1list.shape), 25)
    plt.colorbar()
    plt.show()
Exemplo n.º 6
0
def plotAll_in_subplots(testx, testy, f, ax=plt):
    """ Plot la frontière ainsi que les données sur l'axe ax

    :param testx: Contient les exemples de la base de test
    :param testy: Labels de la base de test
    :param f: fonction de prédiction
    :param ax: axe sur lequel affiché le graphique.

    """

    # Plot frontière
    grid, x, y = make_grid(data=testx, step=50)
    ax.contourf(x, y, f(grid).reshape(x.shape), 255)

    # Plot data
    cols = ["red", "green", "blue", "orange", "black", "cyan"]
    marks = [".", "+", "*", "o", "x", "^"]
    for i, l in enumerate(sorted(list(set(testy.flatten())))):
        ax.scatter(testx[testy == l, 0],
                   testx[testy == l, 1],
                   c=cols[i],
                   marker=marks[i])
def plot_frontiere_proba(data, f, step=20):
    grid, x, y = at.make_grid(data=data, step=step)
    plt.contourf(x, y, f(grid).reshape(x.shape), 255)
Exemplo n.º 8
0
def main():
    """ Tracer des isocourbes de l'erreur """

    # plt.ion
    trainx, trainy = gen_arti(nbex=1000, data_type=1, epsilon=0.3)
    testx, testy = gen_arti(nbex=1000, data_type=1, epsilon=0.3)
    grid, x1list, x2list = make_grid(xmin=-4, xmax=4, ymin=-4, ymax=4)

    # Plot error for test
    # plot_cost_erreur(testx, testy)

    # Batch gradient descent
    perceptron = Perceptron(loss=hinge,
                            loss_g=hinge_g,
                            max_iter=1000,
                            eps=0.1,
                            kernel=None)
    learn_plot_perceptron2D(perceptron,
                            trainx,
                            trainy,
                            testx,
                            testy,
                            gradient_descent="batch",
                            title="batch gradient descent")

    perceptron_poly = Perceptron(loss=hinge,
                                 loss_g=hinge_g,
                                 max_iter=100,
                                 eps=0.1,
                                 kernel="polynomial")
    learn_plot_perceptron2D(perceptron_poly,
                            trainx,
                            trainy,
                            testx,
                            testy,
                            gradient_descent="batch",
                            title="Batch gradient descent with polynomial "
                            "kernel")

    perceptron_gaussian = Perceptron(loss=hinge,
                                     loss_g=hinge_g,
                                     max_iter=100,
                                     eps=0.1,
                                     kernel="gaussian")
    learn_plot_perceptron2D(perceptron_gaussian,
                            trainx,
                            trainy,
                            testx,
                            testy,
                            gradient_descent="batch",
                            title="Batch gradient descent with gaussian "
                            "kernel")

    # Stochastic gradient descent
    perceptron_stochastic = Perceptron(loss=stochastic,
                                       loss_g=stochastic_g,
                                       max_iter=10,
                                       eps=0.1,
                                       kernel=None)
    learn_plot_perceptron2D(perceptron_stochastic,
                            trainx,
                            trainy,
                            testx,
                            testy,
                            gradient_descent="stochastic",
                            title="Stochastic gradient descent")

    # Mini-Batch gradient descent
    perceptron_minibash = Perceptron(loss=hinge,
                                     loss_g=hinge_g,
                                     max_iter=100,
                                     eps=0.1,
                                     kernel=None)
    learn_plot_perceptron2D(perceptron_minibash,
                            trainx,
                            trainy,
                            testx,
                            testy,
                            gradient_descent="minibatch",
                            title="Mini-Batch gradient descent")

    # Stochastic gradient descent Animation
    # perceptron_stoch_anim = Perceptron(loss=stochastic, loss_g=stochastic_g,
    #                                    max_iter=1, eps=0.1, kernel=None)
    # learn_plot_perceptron2D(perceptron_stoch_anim, trainx, trainy, testx,
    #                         testy, gradient_descent="stochastic_animation",
    #                         title="Stochastic gradient descent")

    ##########################################################################
    # ------------------------------------ USPS ---------------------------- #
    ##########################################################################

    perceptron_usps = Perceptron(loss=hinge,
                                 loss_g=hinge_g,
                                 max_iter=500,
                                 eps=0.1,
                                 kernel=None)

    # Matrice de poids 6 vs 9 and 1 vs 8
    fig, (ax1, ax2) = plt.subplots(ncols=2, sharex=True, sharey=True)
    plt.suptitle("Matrice de poids")
    weight_matrix(6, 9, fig, perceptron_usps, ax1)
    weight_matrix(1, 8, fig, perceptron_usps, ax2)
    # plt.savefig("weight_matrix_qqlexs")

    # Matrice de poids 6 vs All
    matrix_one_vs_all(6, perceptron_usps)

    # Courbes d'erreurs 6 vs All
    error_curves(6, "sklearn_perceptron")