Exemple #1
0
def eval_accuracy(net_path, name):
    path = 'UCRArchive_2018/' + name + '/' + name + '_TEST.tsv'
    net = torch.load(net_path)
    net.eval()
    test_set, n_class = load_ucr(path)
    # x_test = test_set[:, 1:]
    # x_test = torch.from_numpy(x_test).unsqueeze(dim=-1).float()
    # print(x_test.shape)
    # y_true = test_set[:, 0]
    # y_pred = net(x_test)
    # y_pred = torch.argmax(y_pred, dim=-1)
    # print(y_pred)
    # y_pred = y_pred.detach().numpy()
    dataset = UcrDataset(test_set, channel_last=opt.channel_last)
    dataloader = UCR_dataloader(dataset, batch_size=128)

    y_pred = []
    y_true = []
    with torch.no_grad():
        for i, (data, label) in enumerate(dataloader):
            data = data.float().to(device)
            y_ = net(data).cpu()
            y_ = torch.argmax(y_, dim=1)
            y_pred.append(y_.detach().numpy())
            y_true.append(label.view(-1).long())
    y_pred = np.concatenate(y_pred)
    y_true = np.concatenate(y_true)
    res = calculate_metrics(y_true, y_pred)
    return res
Exemple #2
0
def train(name):
    record = pd.DataFrame(data=np.zeros((1, 4), dtype=np.float),
                          columns=['precision', 'accuracy', 'recall', 'F1'])
    for _ in range(opt.runs):
        seed = random.randint(1, 10000)
        print("Random Seed: ", seed)
        torch.manual_seed(seed)

        # mkdirs for checkpoints output
        os.makedirs(opt.checkpoints_folder, exist_ok=True)
        os.makedirs('%s/%s' % (opt.checkpoints_folder, name), exist_ok=True)
        os.makedirs('report_metrics', exist_ok=True)

        root_dir = 'report_metrics/%s_aug_%s_IMBA/%s' % (
            opt.model, str(opt.n_group), name)
        os.makedirs(root_dir, exist_ok=True)

        # 加载数据集
        path = 'UCRArchive_2018/' + name + '/' + name + '_TRAIN.tsv'
        train_set, n_class = load_ucr(path)

        print('启用平衡数据增强!')
        stratified_train_set = stratify_by_label(train_set)
        data_aug_set = data_aug_by_dft(stratified_train_set, opt.n_group)
        total_set = np.concatenate((train_set, data_aug_set))
        print('Shape of total set', total_set.shape)
        dataset = UcrDataset(total_set, channel_last=opt.channel_last)

        batch_size = int(min(len(dataset) / 10, 16))
        dataloader = UCR_dataloader(dataset, batch_size)

        # Common behavior
        seq_len = dataset.get_seq_len()  # 初始化序列长度
        # 创建分类器对象\损失函数\优化器
        if opt.model == 'r':
            net = ResNet(n_in=seq_len, n_classes=n_class).to(device)
        if opt.model == 'f':
            net = ConvNet(n_in=seq_len, n_classes=n_class).to(device)
        criterion = nn.CrossEntropyLoss().to(device)
        optimizer = optim.Adam(net.parameters(), lr=opt.lr)
        scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                         mode='min',
                                                         factor=0.5,
                                                         patience=50,
                                                         min_lr=0.0001)

        min_loss = 10000
        print('############# Start to Train ###############')
        net.train()
        for epoch in range(opt.epochs):
            for i, (data, label) in enumerate(dataloader):
                data = data.float()
                data = data.to(device)
                label = label.long()
                label = label.to(device)
                optimizer.zero_grad()
                output = net(data)
                loss = criterion(output, label.view(label.size(0)))
                loss.backward()
                optimizer.step()
                scheduler.step(loss)
                # print('[%d/%d][%d/%d] Loss: %.8f ' % (epoch, opt.epochs, i + 1, len(dataloader), loss.item()))
            if loss < min_loss:
                min_loss = loss
                # End of the epoch,save model
                print('MinLoss: %.10f Saving the best epoch model.....' %
                      min_loss)
                torch.save(
                    net, '%s/%s/%s_%s_best_IMBA.pth' %
                    (opt.checkpoints_folder, name, opt.model, str(
                        opt.n_group)))
        net_path = '%s/%s/%s_%s_best_IMBA.pth' % (opt.checkpoints_folder, name,
                                                  opt.model, str(opt.n_group))
        one_record = eval_accuracy(net_path, name)
        print('The minimum loss is %.8f' % min_loss)
        record = record.append(one_record, ignore_index=True)
    record = record.drop(index=[0])
    record.loc['mean'] = record.mean()
    record.loc['std'] = record.std()
    record.to_csv(root_dir + '/metrics.csv')
    # all_reprot_metrics.loc[name, 'acc_mean'] = record.at['mean', 'accuracy']
    # all_reprot_metrics.loc[name, 'acc_std'] = record.at['std', 'accuracy']
    # all_reprot_metrics.loc[name, 'F1_mean'] = record.at['mean', 'F1']
    # all_reprot_metrics.loc[name, 'F1_std'] = record.at['std', 'F1']

    print('\n')
Exemple #3
0
        # inverse the frequency domain into time domain with the same time step
        X_combined = np.fft.irfft(sampled_X_dft, n_time_steps)
        # add the corresponding label
        Y = Y[:sampled_X_dft.shape[0], np.newaxis]
        data_combined = np.concatenate((Y, X_combined), axis=1)
        # print('Shape of th combined data', data_combined.shape)
        data_augmented.append(data_combined)
    print('\n# Succeed to augment the data! #')
    data_augmented = np.concatenate(data_augmented)
    print('Shape of the augmented data', data_augmented.shape)
    return data_augmented


if __name__ == '__main__':
    data_path = 'UCRArchive_2018/FaceAll/FaceAll_TRAIN.tsv'
    data, n_class = load_ucr(data_path)
    x_ori = data[:, 1:]
    y_ori = data[:, 0]
    time_step = len(data[0]) - 1
    stratified_data = stratify_by_label(data)
    data_aug = data_aug_by_dft(stratified_data,
                               n_group=4,
                               train_size=len(data))
    # x_aug = data_aug[:, 1:]
    # y_aug = data_aug[:, 0]
    # plot_tsne(x_ori, x_aug, y_ori, y_aug,n_class=n_class)
    # plot_ori_tsne(x_ori, y_ori)
    x = data_aug[0, 1:]
    plt.axis('off')
    plt.plot(stratified_data[0][4, 1:], color='b')
    plt.plot(x, color='r')
Exemple #4
0
from dft_aug import data_aug_by_dft
from utils import UcrDataset, UCR_dataloader, load_ucr, stratify_by_label
import numpy as np

# Press the green button in the gutter to run the script.
if __name__ == '__main__':
    path = 'UCRArchive_2018/Car/Car_TRAIN.tsv'
    data, n_class = load_ucr(path)
    print(data.shape)
    stratified_data = stratify_by_label(data)
    data_aug = data_aug_by_dft(stratified_data, ratio=0.4, n_group=4)
    data_ori_and_aug = np.concatenate((data, data_aug))
    dataset = UcrDataset(data_ori_and_aug)
    dataloader = UCR_dataloader(dataset, batch_size=148)
    for i, (data, label) in enumerate(dataloader):
        print(data.size())
        print(label)

# See PyCharm help at https://www.jetbrains.com/help/pycharm/