示例#1
0
文件: har.py 项目: ropok/skripsi-har
def main(train_dataset=None, validation_dataset=None, test_dataset=None, epoch=30, batch_size=128,
         logdir=None, run=1, variation=1, checkpoint=None):
    """HAR Model Trainer

    Train the `ConvLSTM` model with several `variation` of `ConvLSTMHyperparameter`. The model is
    trained with dataset from `train_dataset`, validated with dataset from `validation_dataset` and
    tested with dataset from `test_dataset`.

    Training summary and checkpoints is saved to `logdir`. A log directory is created for each
    variation, started from number provided to `run`.

    To restore a checkpoint before training or testing, provide the path to `checkpoint`.

    Args:
        - `train_dataset`:      path to train dataset
        - `validation_dataset`: path to validation dataset
        - `test_dataset`:       path to test dataset
        - `epoch`:          number of epoch to train
        - `batch_size`:     mini batch size used for training
        - `logdir`:         path to save checkpoint and summary
        - `run`:            number of run for the first variation, used for log directory naming
        - `variation`:      number of hyperparameter variation
        - `checkpoint`:     checkpoint path to restore

    """
    hyperparameters = convlstm_hyperparamter(variation)
    for i, hyperparameter in enumerate(hyperparameters):
        run_logdir = os.path.join(logdir, 'run' + str(i + run))
        model = ConvLSTM(hyperparameter, run_logdir)
        print('Run %d/%d' % (i + 1, variation))
        print_hyperparameter_notes(hyperparameter)
        write_hyperparameter_notes(hyperparameter, run_logdir)

        if train_dataset and validation_dataset:
            train_data = load(train_dataset, NUM_TARGET, WINDOW_SIZE)
            validation_data = load(validation_dataset, NUM_TARGET, WINDOW_SIZE)

            if train_data.data.any() and validation_data.data.any():
                model.train(train_data, validation_data, epoch, batch_size, checkpoint)

        if test_dataset:
            test_data = load(test_dataset, NUM_TARGET, WINDOW_SIZE)

            if test_data.data.any():
                prediction = model.test(test_data, batch_size, checkpoint)
                model.confusion_matrix(prediction, test_data.target)

        tf.reset_default_graph()
示例#2
0
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                 milestones=opt.milestones,
                                                 gamma=0.5)

header = ['epoch/total_epoch', 'test_mse']
with open(test_mse_path, 'w') as testcsvmes:  # open trainfile
    writertest = csv.writer(testcsvmes)
    writertest.writerow(header)
    # trainning
    for epoch in range(1, opt.n_epoch + 1):
        print('\repoch {}'.format(epoch))
        scheduler.step()
        print('*' * 10)
        model.train()
        for i, (input, label) in enumerate(train_loader):
            if use_gpu:
                input = torch.FloatTensor(input).cuda()
                label = torch.FloatTensor(label).cuda()
            else:
                input = torch.FloatTensor(input)
                label = torch.FloatTensor(label)

            #label = label.squeeze()
            # foreward
            pred = model(input)
            loss = criterion(pred, label)

            # backward
            optimizer.zero_grad()