コード例 #1
0
def main_circles():
    # init
    data = CirclesData()
    data.plot_data()
    np.random.seed(42)
    N = data.Xtrain.shape[0]
    inds = np.arange(0, N)
    np.random.shuffle(inds)
    Xtrain = data.Xtrain[inds]
    Ytrain = data.Ytrain[inds]
    Nbatch = 15
    nx = data.Xtrain.shape[1]
    nh = 10
    ny = data.Ytrain.shape[1]
    eta = 0.03

    # Premiers tests, code à modifier
    model, loss, optim = init_model(nx, nh, ny, eta)

    writer = SummaryWriter()
    L, acc = 0, 0

    # TODO apprentissage
    Nepochs = 200
    for i in range(Nepochs):

        for j in range(0, N, Nbatch):
            Xbatch = Xtrain[j:j + Nbatch]
            Ybatch = Ytrain[j:j + Nbatch]
            Yhat = model(Xbatch)
            L, acc = loss_accuracy(loss, Yhat, Ybatch)
            # Calcule les gradients
            optim.zero_grad()
            L.backward()
            optim.step()

        # Loss and Accuracy on Test
        Yhat_test = model(data.Xtest)
        L_test, acc_test = loss_accuracy(loss, Yhat_test, data.Ytest)

        data.plot_loss(L, L_test, acc, acc_test)

    Ygrid = torch.nn.Softmax(dim=1)(model(data.Xgrid))
    data.plot_data_with_grid(Ygrid.detach())

    # attendre un appui sur une touche pour garder les figures
    input("done")
コード例 #2
0
ファイル: circlesV4.py プロジェクト: keyber/RDFIA
        torch.nn.Linear(nh, ny),
    )
    loss = torch.nn.CrossEntropyLoss()
    optim = torch.optim.SGD(model.parameters(), lr=eta)
    return model, loss, optim


def loss_accuracy(Yhat, Y, loss):
    L = loss(Yhat, Y)
    acc = torch.mean((Yhat.argmax(1) == Y).float())
    return L, acc


if __name__ == '__main__':

    data = CirclesData()

    data.plot_data()

    # init
    N = data.Xtrain.shape[0]
    Nbatch = 16
    nx = data.Xtrain.shape[1]
    nh = 10
    ny = data.Ytrain.shape[1]
    model, loss, optim = init_model(nx, nh, ny)

    # epoch
    acctests = []
    for iteration in range(100):
コード例 #3
0

def sgd(model, eta):
    # TODO mettre à jour le contenu de params

    with torch.no_grad():
        for param in model.parameters():
            param -= eta * param.grad

        model.zero_grad()


if __name__ == '__main__':

    # init
    data = CirclesData()
    data.plot_data()
    np.random.seed(42)

    N = data.Xtrain.shape[0]
    inds = np.arange(0, N)
    np.random.shuffle(inds)
    Xtrain = data.Xtrain[inds]
    Ytrain = data.Ytrain[inds]
    Nbatch = 20
    nx = data.Xtrain.shape[1]
    nh = 10
    ny = data.Ytrain.shape[1]
    eta = 0.03

    # Premiers tests, code à modifier
コード例 #4
0
ファイル: circles.py プロジェクト: yamidevs/rdfia
def sgd(params, grads, eta):
    # TODO mettre à jour le contenu de params

    params["Wy"] -= grads["Wy"] * eta
    params["by"] -= grads["by"] * eta
    params["Wh"] -= grads["Wh"] * eta
    params["bh"] -= grads["bh"] * eta

    return params


if __name__ == '__main__':

    # init
    data = CirclesData()
    # data.plot_data()
    N = data.Xtrain.shape[0]
    Nepoch = 1500  # 3000
    Nbatch = 10
    nx = data.Xtrain.shape[1]
    nh = 10
    ny = data.Ytrain.shape[1]
    eta = 0.2
    print('Data set size: ', N)  # 200 here
    params = init_params(nx, nh, ny)
    printInterval = 100
    trainLosses = []
    testLosses = []

    # TODO apprentissage
コード例 #5
0
ファイル: circlesV2.py プロジェクト: yamidevs/rdfia
        params['Wh'] -= params['Wh'].grad * eta
        params['bh'] -= params['bh'].grad * eta

        # remet l'accumulateur de gradient à zéro
        params['Wy'].grad.zero_()
        params['by'].grad.zero_()
        params['Wh'].grad.zero_()
        params['bh'].grad.zero_()

    return params


if __name__ == '__main__':

    # init
    data = CirclesData()
    # data.plot_data()
    N = data.Xtrain.shape[0]
    Nepoch = 1500  # 3000
    Nbatch = 10
    nx = data.Xtrain.shape[1]
    nh = 10
    ny = data.Ytrain.shape[1]
    eta = 0.2
    print('Data set size: ', N)  # 200 here
    params = init_params(nx, nh, ny)
    printInterval = 100
    trainLosses = []
    testLosses = []

    # TODO apprentissage
コード例 #6
0
    return grads


def sgd(params, grads, eta):
    with torch.no_grad():
        for k in params.keys():
            print("nanaanana : ", k, grads[k])
            params[k] -= eta * grads[k]

    return params


if __name__ == '__main__':

    # init
    data = CirclesData()
    #data.plot_data()
    N = data.Xtrain.shape[0]
    Nbatch = 10
    nx = data.Xtrain.shape[1]
    nh = 10
    ny = data.Ytrain.shape[1]
    eta = 0.03

    # Premiers tests, code à modifier
    params = init_params(nx, nh, ny)
    Yhat, outs = forward(params, data.Xtrain)
    L, _ = loss_accuracy(Yhat, data.Ytrain)
    grads = backward(params, outs, data.Ytrain)
    params = sgd(params, grads, eta)
コード例 #7
0
    #print(grads)
    return grads


def sgd(params, grads, eta):
    # TODO mettre à jour le contenu de params
    with torch.no_grad():
        for key in params.keys():
            params[key] -= eta * grads[key]
    return params


if __name__ == '__main__':

    # init
    data = CirclesData()
    #data.plot_data()
    N = data.Xtrain.shape[0]
    Nbatch = 20
    nx = data.Xtrain.shape[1]
    nh = 10
    ny = data.Ytrain.shape[1]
    eta = 0.003
    index = np.arange(len(data.Xtrain))
    random.shuffle(index)
    data.Xtrain = data.Xtrain[index]
    data.Ytrain = data.Ytrain[index]
    # Premiers tests, code à modifier
    params = init_params(nx, nh, ny)
    """Yhat, outs = forward(params, data.Xtrain.double())
    L, _ = loss_accuracy(Yhat.double(), data.Ytrain.long())
コード例 #8
0
ファイル: test.py プロジェクト: keyber/RDFIA
# Chargement de la classe
from tme5 import CirclesData # import de la classe
data = CirclesData() # instancie la classe fournie
# Accès aux données
Xtrain = data.Xtrain # torch.Tensor contenant les entrées du réseau pour l'apprentissage
print(Xtrain.shape) # affiche la taille des données : torch.Size([200, 2])
N = Xtrain.shape[0] # nombre d'exemples
nx = Xtrain.shape[1] # dimensionalité d'entrée
# données disponibles : data.Xtrain, data.Ytrain, data.Xtest, data.Ytest,data.Xgrid


# Fonctions d'affichage
data.plot_data() # affiche les points de train et test
#Ygrid = forward(params, data.Xgrid) # calcul des predictions Y pour tous les points de la grille
# (forward et params non fournis, à coder)
#data.plot_data_with_grid(Ygrid) # affichage des points et de la frontière de décision gr^ace à la grille
#data.plot_loss(loss_train, loss_train, acc_train, acc_test) # affiche les courbes de loss et accuracy en train et test. 
#Les valeurs à fournir sont des scalaires,elles sont stockées pour vous,
# il suffit de passer les nouvelles valeurs à chaque itératio
コード例 #9
0
ファイル: circles_V3.py プロジェクト: ykrmm/RDFIA
        else:
            self.data = data.Xtest
            labels = data.Ytest

        _,self.labels =  torch.max(labels, 1) # must be indice and not one-hot encoder.
    def __getitem__(self,index):
        return self.data[index],self.labels[index]

    def __len__(self):
        return len(self.labels)


if __name__ == '__main__':

    # data circle
    data_circle = CirclesData()
    data_circle.plot_data()
    N = data_circle.Xtrain.shape[0]
    nx = data_circle.Xtrain.shape[1]
    nh = 10
    ny = data_circle.Ytrain.shape[1]


    # model 
    model = Circle_Model(nx, nh, ny)


    # dataloader
    batch_size = 50
    shuffle_dataset = True    
    dataset_train = Circle_Dataset(data_circle)
コード例 #10
0
# import torch

from tme5 import CirclesData  # import de la classe
from circles import init_params, forward, loss_accuracy, backward, sgd

if __name__ == '__main__':
    # Chargement de la classe
    data = CirclesData()  # instancie la classe fournie

    # Acces aux donn ees
    Xtrain = data.Xtrain  # torch.Tensor contenant les entr ́ees du r ́eseau pour 􏱈→ l'apprentissage
    # print('Xtrain.shape: ', Xtrain.shape) # affiche la taille des donn ́ees : torch.Size([200, 2]) N = Xtrain.shape[0] # nombre d'exemples

    nx = Xtrain.shape[1]  # dimensionalite d'entree
    # donn ees disponibles : data.Xtrain, data.Ytrain, data.Xtest, data.Ytest, -> data.Xgrid
    # Fonctions d'affichage
    # data.plot_data() # affiche les points de train et test

    # Test of init_params
    # nh = 4
    # n = torch.distributions.Normal(torch.tensor(0.0), torch.tensor(0.3))
    # wh = n.sample((nh,))
    # print('wh: ', wh.shape, '\n', wh)

    Ytrain = data.Ytrain
    # print(Xtrain[0])
    # print('Ytrain: ', Ytrain.shape)
    # print(Ytrain)
    nh = 6
    ny = 2
    params = init_params(nx, nh, ny)