Beispiel #1
0
def train(**kwargs):
    conf.parse(kwargs)

    # train_set = DataSet(cfg, train=True, test=False)
    train_set = ImageFolder(conf.TRAIN_DATA_ROOT, transform)
    train_loader = DataLoader(train_set, conf.BATCH_SIZE,
                              shuffle=True,
                              num_workers=conf.NUM_WORKERS)

    model = Network()

    if conf.LOAD_MODEL_PATH:
        print(conf.LOAD_MODEL_PATH)
        model.load_state_dict(torch.load(conf.CHECKPOINTS_ROOT + conf.LOAD_MODEL_PATH))

    device = torch.device('cuda:0' if conf.USE_GPU else 'cpu')
    criterion = nn.CrossEntropyLoss().to(device)
    lr = conf.LEARNING_RATE
    optim = torch.optim.Adam(params=model.parameters(),
                             lr=lr,
                             weight_decay=conf.WEIGHT_DECAY)
    model.to(device)

    for epoch in range(conf.MAX_EPOCH):

        model.train()
        running_loss = 0
        for step, (inputs, targets) in tqdm(enumerate(train_loader)):

            inputs, targets = inputs.to(device), targets.to(device)
            optim.zero_grad()
            outs = model(inputs)
            loss = criterion(outs, targets)
            loss.backward()
            optim.step()

            running_loss += loss.item()
            if step % conf.PRINT_FREQ == conf.PRINT_FREQ - 1:
                running_loss = running_loss / conf.PRINT_FREQ
                print('[%d, %5d] loss: %.3f' % (epoch + 1, step + 1, running_loss))
                # vis.plot('loss', running_loss)
                running_loss = 0



        torch.save(model.state_dict(), conf.CHECKPOINTS_ROOT + time.strftime('%Y-%m-%d-%H-%M-%S.pth'))

        for param_group in optim.param_groups:
            lr *= conf.LEARNING_RATE_DECAY
            param_group['lr'] = lr
# -*- coding: utf-8 -*-

import os

from config import Config

from model.data import Data
from model.network import Network

if __name__ == '__main__':
    dir_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            Config.CURRENT_MODEL_BASE_PATH)
    data = Data()
    data.load_data_from_file(os.path.join(dir_path,
                                          'data.nosync/all_data.npy'))
    model = Network(data, os.path.join(dir_path, 'log.nosync/network/run1'))
    model.train()
class LogisticRegression(Classifier):
    """
    A digit-7 recognizer based on logistic regression algorithm
    Parameters
    ----------
    train : list
    valid : list
    test : list
    learningRate : float
    epochs : positive int
    Attributes
    ----------
    trainingSet : list
    validationSet : list
    testSet : list
    weight : list
    learningRate : float
    epochs : positive int
    """

    def __init__(self, train, valid, test, learningRate=0.1, epochs=50):

        self.learningRate = learningRate
        self.epochs = epochs

        self.trainingSet = train
        self.validationSet = valid
        self.testSet = test
        self.network = Network(learningRate)
        
    def train(self, verbose=True):
        """Train the Logistic Regression.
        Parameters
        ----------
        verbose : boolean
            Print logging messages with validation accuracy if verbose is True.
        """

        # Here you have to implement training method "epochs" times
        # Please using LogisticLayer class
        self.epochs = 1
        for i in range(0,self.epochs):
            print i
            self.network.train(self.trainingSet.input, self.trainingSet.label)
        pass

    def classify(self, testInstance):
        """Classify a single instance.
        Parameters
        ----------
        testInstance : list of floats
        Returns
        -------
        bool :
            True if the testInstance is recognized as a 7, False otherwise.
        """

        # Here you have to implement classification method given an
        # instance
        res = self.network.classify(testInstance)
        if res > 0.5:
            return 1
        else:
            return 0

    def evaluate(self, test=None):
        """Evaluate a whole dataset.
        Parameters
        ----------
        test : the dataset to be classified
        if no test data, the test set associated to the classifier will be used
        Returns
        -------
        List:
            List of classified decisions for the dataset's entries.
        """
        if test is None:
            test = self.testSet.input
        # Once you can classify an instance, just use map for all of the test
        # set.
        return list(map(self.classify, test))