示例#1
0
def step3():
    x_train, y_train, nbData, nbCategories, nbDescriptors=getData('data/species2_train.csv')


    model=LogisticModel_Gradient(nbDescriptors,nbCategories)
    model.verbose=True

    model.fit(x_train,y_train)

    """décrivez ce qu'on affiche quand on met False ci-dessous"""
    categoriesVersusProba=True
    if categoriesVersusProba:
        fonc=lambda x: model.predict(x)
    else :
        fonc=lambda x: model.predict_proba(x)[:,0]

    viewer = ScatterAndFunctionViewer(
        x_scatter=x_train,
        u_scatter=y_train,
        function=fonc
    )

    viewer.scatter_edgeColor = "black"
    viewer.plot()
    model.close()
示例#2
0
def step1():

    z=np.loadtxt("data/affineData2d.csv",delimiter=",")
    x=z[:,0:2].astype(np.float32)
    y=z[:,2].astype(np.float32)

    viewer= ScatterAndFunctionViewer(x,y)
    viewer.addLabelsOnScatter=True
    viewer.plot()
示例#3
0
def step4():

    x_train, y_train, nbData, nbCategories, nbDescriptors = getData(
        'Abis_layers_visu/species2_diago_train.csv')

    def standardize(x: np.ndarray):
        si = np.std(x)
        return (x - np.mean(x)) / si

    def expandX_withProd(x):
        x0 = standardize(x[:, 0])
        x1 = standardize(x[:, 1])
        x_prod = standardize(x0 * x1)

        return np.stack([x0, x1, x_prod], axis=1)

    def expandX_withNorm(x):
        x0 = standardize(x[:, 0])
        x1 = standardize(x[:, 1])
        x_norm = standardize(x0**2 + x1**2)
        return np.stack([x0, x1, x_norm], axis=1)

    withProd = False
    if withProd: expandX = expandX_withProd
    else: expandX = expandX_withNorm

    nbCategories = 2
    nbDescriptors = 3  #car on en rajoute 1

    model = LogisticModel_Gradient(nbDescriptors, nbCategories)
    model.verbose = True

    model.fit(expandX(x_train), y_train)

    categoriesVersusProba = True
    if categoriesVersusProba:
        fonc = lambda x: model.predict(expandX(x))
    else:
        fonc = lambda x: model.predict_proba(expandX(x))[:, 0]

    viewer = ScatterAndFunctionViewer(x_scatter=x_train,
                                      u_scatter=y_train,
                                      function=fonc)

    viewer.scatter_edgeColor = "black"
    viewer.plot()
    model.close()
示例#4
0
def step1():

    z = np.loadtxt('data/deuxQuartiers_train.csv', delimiter=',')
    x, y = z[:, 0:2], z[:, 2]

    viewer = ScatterAndFunctionViewer(x, y)
    viewer.xLabel = "surface en m^2"
    viewer.yLabel = "quartier"
    viewer.title = "prix en euros selon les quartiers"
    viewer.plot()
示例#5
0
def step2():
    """"""
    """ une variable de dimension 2 """
    x = tf.get_variable("x", initializer=[3., 3.])
    y = 50 * x[0]**2 + 0.1 * x[1]**2
    gradient = tf.gradients(y, x)[0]

    nb = 20
    x_ = np.empty([nb, 2])
    training_step = 0.01
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for k in range(nb):
            x_[k, :] = sess.run(x)
            sess.run(x.assign(x - gradient * training_step))

    def fonc(x):
        return 50 * x[:, 0]**2 + 0.1 * x[:, 1]**2

    y = 200 * np.ones(nb)

    viewer = ScatterAndFunctionViewer(x_, y, fonc)
    viewer.functionInterval_x0 = [-2, 2]
    viewer.functionInterval_x1 = [-2, 2]
    viewer.plot()
示例#6
0
def step1():
    """"""
    """ une variable de dimension 2 """
    x = tf.get_variable("x", initializer=[0.4, 0.3])
    y = tf.sin(3 * x[0]) * tf.cos(3 * x[1])
    gradient = tf.gradients(y, x)[0]

    nb = 20
    x_ = np.empty([nb, 2])

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for k in range(nb):
            x_[k, :] = sess.run(x)
            sess.run(x.assign(x - gradient * 0.05))

    def fonc(x):
        return np.sin(3 * x[:, 0]) * np.cos(3 * x[:, 1])

    y = np.ones(nb)

    viewer = ScatterAndFunctionViewer(x_, y, fonc)
    viewer.functionInterval_x0 = [-2, 2]
    viewer.functionInterval_x1 = [-2, 2]
    viewer.plot()
示例#7
0
def localTest():
    def createData(nbData: int, sigma=0.1) -> Tuple[np.ndarray, np.ndarray]:
        x = np.random.random([nbData, 2]) * 2
        w = np.array([-1., 2])
        '''y[i]= sum_j x[ij] w[j]  '''
        y = np.sum(x * w, axis=1) + 5 + np.random.normal(
            0, sigma, size=[nbData])
        return x, y

    x_train, y_train = createData(100)

    model = LinearModel_Gradient(2)
    model.verbose = True
    model.ridgePenalisationCoef = 0.1
    model.training_step = 0.1
    print(x_train.shape, y_train.shape)
    model.fit(x_train, y_train)

    def hatFunction(x_test):
        return model.predict(x_test)

    viewer = ScatterAndFunctionViewer(x_train, y_train, function=hatFunction)
    viewer.functionInterval_x0 = [0, 2]
    viewer.functionInterval_x1 = [0, 2]
    #viewer.scatter_edgeColor="black"
    viewer.plot()
示例#8
0
def step5():

    x_train, y_train, nbData, nbCategories, nbDescriptors = getData(
        'Abis_layers_visu/species2_diago_train.csv')

    nbCategories = 2
    nbDescriptors = 2

    model = TwoLayerModel(nbDescriptors, nbCategories)
    model.nbHidden = 7
    model.verbose = True

    categoriesVersusProba = True
    if categoriesVersusProba:
        fonc = lambda x: model.predict(x)
    else:
        fonc = lambda x: model.predict_proba(x)[:, 0]

    plt.ion()

    try:
        while True:
            model.fit(x_train, y_train, 1000)

            viewer = ScatterAndFunctionViewer(x_scatter=x_train,
                                              u_scatter=y_train,
                                              function=fonc)

            viewer.scatter_edgeColor = "black"
            viewer.plot()

            plt.pause(0.1)
            plt.clf()
    except KeyboardInterrupt:
        print("FIN du training")

    model.close()
示例#9
0
def step4():

    z = np.loadtxt('data/deuxQuartiers_train.csv', delimiter=',')
    x_train, y_train = z[:, 0:2], z[:, 2]
    """à quoi sert cette standardisation ?
    Que se passe si on l'enlève ?
    Comment pourrait-on faire cette standardisation automatiquement (=sans regarder les données)
    Quand les variables ont des ordres de grandeurs très différent, cela marche moins bien. Relier ce phénomène à
    un phénomène observé dans le TP sur l'optimisation.
    """
    x_train, y_train = standardizeXY(x_train, y_train)

    model = LinearModel_Gradient(2)
    model.training_step = 0.1
    model.verbose = True
    model.fit(x_train, y_train)
    """deux façons  d'afficher les résultats"""
    plot_levelColor = True

    if plot_levelColor:

        def function_hat(x):
            return model.predict(x)

        viewer = ScatterAndFunctionViewer(x_train, y_train, function_hat)
        viewer.xLabel = "surface (/100) m^2"
        viewer.yLabel = "quartier"
        viewer.title = "prix (/10000 euros) euros"
        viewer.plot()

    else:
        """"""
        """regardons si l'on  prédis correctement les données tests,
        quartier par quartier"""
        z = np.loadtxt('data/deuxQuartiers_test.csv', delimiter=',')
        x_test, y_test = z[:, 0:2], z[:, 2]

        x_test, y_test = standardizeXY(x_test, y_test)
        y_test_hat = model.predict(x_test)

        print("biais estimé:", model.b_hat)
        print("coef estimé:", model.W_hat)

        plotOneQuartier(0, x_test, y_test, y_test_hat, '+')
        plotOneQuartier(1, x_test, y_test, y_test_hat, '.')

    plt.show()
示例#10
0
def step3():

    z=np.loadtxt("data/affineData2d.csv",delimiter=",")
    x_train=z[:,0:2].astype(np.float32)
    y_train=z[:,2].astype(np.float32)

    model=LinearModel_Gradient(2)
    model.fit(x_train,y_train)

    print("weights estimated:",model.W_hat)
    print("biais estimated:",model.b_hat)

    def hatFunction(x_test):
        return np.sum(model.W_hat * x_test,axis=1) + model.b_hat

    viewer = ScatterAndFunctionViewer(x_train, y_train, function=hatFunction)
    viewer.functionInterval_x0 = [0, 2]
    viewer.functionInterval_x1 = [0, 2]
    viewer.plot()
示例#11
0
def step2():

    z=np.loadtxt("data/affineData2d.csv",delimiter=",")
    x_train=z[:,0:2].astype(np.float32)
    y_train=z[:,2].astype(np.float32)

    W = tf.get_variable("W",  initializer=tf.truncated_normal(shape=[2],stddev=0.1))
    b = tf.get_variable("b", initializer=1.)


    x_train_tf = tf.constant(x_train, dtype=tf.float32)
    y_train_tf = tf.constant(y_train, dtype=tf.float32)

    """
    y[i]= sum_j W[j] * x_train[i,j] + b  """
    y = tf.reduce_sum(W * x_train_tf,axis=1) + b
    loss = tf.reduce_mean(tf.square(y - y_train_tf))
    minimize = tf.train.GradientDescentOptimizer(0.01).minimize(loss)

    W_, b_, loss_ = None, None, None
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for step in range(200):
            sess.run(minimize)
            W_, b_, loss_ = sess.run([W, b, loss])
            print("W:", W_)
            print("b:", b_)
            print("loss:", loss_)

    print("weights estimated:",W_)
    print("biais estimated:",b_)

    def hatFunction(x_test):
        return np.sum(W_ * x_test,axis=1) + b_

    viewer = ScatterAndFunctionViewer(x_train, y_train, function=hatFunction)
    viewer.functionInterval_x0 = [0, 2]
    viewer.functionInterval_x1 = [0, 2]
    viewer.plot()