Beispiel #1
0
    def __init__(self, modelNet, model_name, axis='9axis'):
        super(NN_train, self).__init__()
        self.model_name = model_name
        self.axis = axis

        action_data_train_set = ActionDataSets('train', axis)
        action_data_valid_set = ActionDataSets('valid', axis)

        # 按批加载 pyTorch张量
        self.action_train_data_gen = DataLoader(
            action_data_train_set, batch_size=32, shuffle=True,
            num_workers=2)  # 分成数组(len/128)个batch,每个batch长度是128
        self.action_valid_data_gen = DataLoader(
            action_data_valid_set, batch_size=32, shuffle=True,
            num_workers=2)  # 分成数组(len/128)个batch,每个batch长度是128
        print(
            f'train_data shape: ({len(action_data_train_set)}{(action_data_train_set.data_shape())})'
        )
        print(
            f'valid_data shape: ({len(action_data_valid_set)}{(action_data_valid_set.data_shape())})'
        )

        self.model = copy.deepcopy(modelNet)
        self.device = torch.device(
            'cuda:1' if torch.cuda.is_available() else 'cpu')
Beispiel #2
0
    def __init__(self, model, model_name, axis, data_category):
        super(Extract_1D_2D_features, self).__init__()

        self.model = model
        self.axis = axis
        self.data_category = data_category

        self.device = torch.device(
            'cuda:1' if torch.cuda.is_available() else 'cpu')

        self.model.load_state_dict(
            torch.load(f'src/model/{model_name}_{axis}_model.pkl',
                       map_location='cpu'))
        if torch.cuda.is_available():
            self.model.to(self.device)
        self.model.eval()
        action_data_set = ActionDataSets(data_category, axis)

        # 按批加载 pyTorch张量
        self.action_train_data_gen = DataLoader(
            action_data_set, shuffle=True,
            num_workers=2)  # 分成数组(len/128)个batch,每个batch长度是128
        print(
            f'{data_category}data shape: ({len(action_data_set)}{(action_data_set.data_shape())})'
        )
    def __init__(self, modelNet, model_name, axis, data_category='other_test'):
        super(CF_features, self).__init__()
        self.model = modelNet
        self.axis = axis
        self.data_category = data_category

        self.model.load_state_dict(
            torch.load(f'src/model/{model_name}_{axis}_model.pkl',
                       map_location='cpu'))
        if torch.cuda.is_available():
            self.model.cuda()
        self.model.eval()
        self.model_name = model_name

        self.globel_avgpool_features = {}
        self.model.globel_avgpool.register_forward_hook(
            self.get_globel_avgpool_activation("globel_avgpool"))

        if data_category == 'other_test':
            # 提取实验室其他同学的cnn特征
            action_data_test_set = ActionTestDataSets(axis)
            self.test_action_data_set = DataLoader(action_data_test_set,
                                                   shuffle=True,
                                                   num_workers=2)
        else:
            # 提取运动员的、训练、测试特征
            action_data_test_set = ActionDataSets(data_category, axis)
            self.test_action_data_set = DataLoader(action_data_test_set,
                                                   shuffle=True,
                                                   num_workers=2)

        print(
            f'{self.data_category}_data shape: ({len(action_data_test_set)}{(action_data_test_set.data_shape())})'
        )
Beispiel #4
0
    def __init__(self, axis, data_category):
        super(Extract_origin_features, self).__init__()

        self.axis = axis
        self.data_category = data_category
        action_data_set = ActionDataSets(data_category, axis)

        # 按批加载 pyTorch张量
        self.action_train_data_gen = DataLoader(
            action_data_set, shuffle=True,
            num_workers=2)  # 分成数组(len/128)个batch,每个batch长度是128
        print(
            f'{data_category}data shape: ({len(action_data_set)}{(action_data_set.data_shape())})'
        )
    def __init__(self, modelNet, model_name, axis, batch_size):
        super(NN_Predict, self).__init__()
        self.model = modelNet
        self.axis = axis
        self.device = torch.device(
            'cuda:1' if torch.cuda.is_available() else 'cpu')
        self.batch_size = batch_size
        if torch.cuda.is_available():
            self.model.to(self.device)
        self.model.eval()

        self.model_name = model_name

        action_data_test_set = ActionDataSets('test', axis)

        self.test_action_data_set = DataLoader(action_data_test_set,
                                               shuffle=True,
                                               num_workers=0)
        print(
            f'test_data shape: ({len(action_data_test_set)}{(action_data_test_set.data_shape())})'
        )
Beispiel #6
0
    def __init__(self, modelNet, model_name, axis):
        super(NN_Predict, self).__init__()
        self.model = modelNet
        self.axis = axis
        self.device = torch.device(
            'cuda:1' if torch.cuda.is_available() else 'cpu')

        self.model.load_state_dict(
            torch.load(f'src/model/{model_name}_{axis}_model.pkl',
                       map_location='cpu'))
        if torch.cuda.is_available():
            self.model.to(self.device)
        self.model.eval()

        self.model_name = model_name

        action_data_test_set = ActionDataSets('test', axis)

        self.test_action_data_set = DataLoader(action_data_test_set,
                                               shuffle=True,
                                               num_workers=0)
        print(
            f'test_data shape: ({len(action_data_test_set)}{(action_data_test_set.data_shape())})'
        )
    def train(self):
        # all_batch_size = [32, 64, 128, 256]
        all_batch_size = [64, 64, 64, 64]
        train_loss_dict = {}
        valid_loss_dict = {}
        right_ratio_dict = {}

        for batch_index, batch_size in enumerate(all_batch_size):
            since = time.time()

            action_data_train_set = ActionDataSets('train', axis)
            action_data_valid_set = ActionDataSets('valid', axis)
            # 按批加载 pyTorch张量
            self.action_train_data_gen = DataLoader(
                action_data_train_set,
                batch_size=batch_size,
                shuffle=True,
                num_workers=2)  # 分成数组(len/128)个batch,每个batch长度是128
            self.action_valid_data_gen = DataLoader(
                action_data_valid_set,
                batch_size=batch_size,
                shuffle=True,
                num_workers=2)  # 分成数组(len/128)个batch,每个batch长度是128
            print(
                f'train_data shape: ({len(action_data_train_set)}{(action_data_train_set.data_shape())})'
            )
            print(
                f'valid_data shape: ({len(action_data_valid_set)}{(action_data_valid_set.data_shape())})'
            )

            self.model = copy.deepcopy(self.modelNet)
            self.device = torch.device(
                'cuda:1' if torch.cuda.is_available() else 'cpu')

            model_ft = self.model

            if torch.cuda.is_available():
                model_ft = model_ft.to(self.device)  # 告知 pyTorch 在Gpu上运行

            dataset_sizes = {
                'train': len(self.action_train_data_gen.dataset),
                'valid': len(self.action_valid_data_gen.dataset)
            }
            dataloaders = {
                'train': self.action_train_data_gen,
                'valid': self.action_valid_data_gen
            }

            # 构建模型:损失函数和优化模型
            num_epochs = 60
            criterion = nn.CrossEntropyLoss()  # criterion:惩罚规则-- 损失函数
            # optimizer_ft = optim.SGD(model_ft.parameters(), lr=0.1, momentum=0.9, weight_decay=0.01)
            optimizer_ft = optim.Adam(model_ft.parameters(),
                                      lr=0.001,
                                      weight_decay=0.10)
            exp_lr_scheduler = optim.lr_scheduler.StepLR(optimizer_ft,
                                                         step_size=20,
                                                         gamma=0.1)

            # best_model_wts = self.model.state_dict()
            best_model_wts = copy.deepcopy(model_ft.state_dict())
            best_acc = 0.0
            train_loss = []
            valid_loss = []
            right_ratio = []  # 正确率

            for epoch in range(1, num_epochs + 1):
                if epoch % 10 == 0:
                    print('-' * 30)
                    print('{}-{}-{},Epoch {}/{} '.format(
                        self.model_name, self.axis, batch_size, epoch,
                        num_epochs))
                    print(f"the lr is :{optimizer_ft.param_groups[0]['lr']}")

                # 每轮都有训练和验证过程
                for phase in ['train', 'valid']:
                    if phase == 'train':
                        model_ft.train(True)
                    else:
                        model_ft.eval()

                    running_loss = 0.0
                    running_corrects = 0  # correct 修正,改正

                    for i, data in enumerate(dataloaders[phase]):
                        inputs, labels = data  # 获取输入
                        # print(inputs.shape)
                        # 封装成变量
                        if torch.cuda.is_available():
                            inputs = inputs.to(self.device)
                            labels = labels.to(self.device)
                        else:
                            inputs, labels = inputs, labels

                        # 前向算法
                        outputs = model_ft(inputs)
                        _, preds = torch.max(outputs.data, 1)
                        loss = criterion(outputs, labels)  # 损失函数
                        # 梯度参数清0
                        optimizer_ft.zero_grad()

                        # 只在训练阶段反向优化
                        if phase == 'train':
                            loss.backward()
                            optimizer_ft.step()
                        # 统计
                        running_loss += loss.item()
                        running_corrects += torch.sum(preds == labels.data)

                    epoch_loss = running_loss / len(
                        dataloaders[phase])  # dataset_sizes[phase]
                    epoch_acc = running_corrects.item() / dataset_sizes[phase]

                    # 计算损失率
                    if phase == 'train':
                        train_loss.append(epoch_loss)
                    else:
                        valid_loss.append(epoch_loss)
                        right_ratio.append(epoch_acc)
                    if epoch % 10 == 0:
                        print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                            phase, epoch_loss, epoch_acc))

                    # 深度复制模型
                    if phase == 'valid' and epoch_acc > best_acc:
                        best_acc = epoch_acc
                        # best_model_wts = model_ft.state_dict()
                        best_model_wts = copy.deepcopy(model_ft.state_dict())

                    if phase == 'train':
                        exp_lr_scheduler.step()

            time_elapsed = time.time() - since
            print('-' * 30)
            print('Training complete in {:.0f}m {:.0f}s'.format(
                time_elapsed // 60, time_elapsed % 60))
            print('Best val Acc: {:4f}'.format(best_acc))

            train_loss_dict[f'batch_size={batch_size}'] = train_loss
            valid_loss_dict[f'batch_size={batch_size}'] = valid_loss
            right_ratio_dict[f'batch_size={batch_size}'] = right_ratio

            nn_predict = NN_Predict(model_ft,
                                    model_name,
                                    axis=axis,
                                    batch_size=batch_size)
            with Timer() as t:
                nn_predict.predict()
            print('predict time {0}'.format(str(t.interval)[:5]))

            # load best model weights
            # model_ft.load_state_dict(best_model_wts)

            if os.path.exists(
                    f'src/rec_batch/model/{self.model_name}_{self.axis}_model_batch_size_{batch_size}_{batch_index}.pkl'
            ):
                os.remove(
                    f'src/rec_batch/model/{self.model_name}_{self.axis}_model_batch_size_{batch_size}_{batch_index}.pkl'
                )
            torch.save(
                model_ft.state_dict(),
                f'src/rec_batch/model/{self.model_name}_{self.axis}_model_batch_size_{batch_size}_{batch_index}.pkl'
            )
            self.plt_image(train_loss, valid_loss, right_ratio, batch_index)

        # 训练完了
        colors = [
            'orange', 'darkorange', 'gold', 'yellowgreen', 'olivedrab',
            'chartreuse', 'lime', 'turquoise', 'lightseagreen', 'midnightblue',
            'navy', 'darkblue', 'purple', 'darkmagen', 'fuchsia'
        ]
        color_index = 0
        plt.title(f'{self.model_name}_{self.axis} training')
        for batch_size_key in ['batch_size=' + str(x) for x in all_batch_size]:
            plt.plot(train_loss_dict[batch_size_key],
                     label=f'Train Loss,{batch_size_key}',
                     c=colors[color_index])
            color_index += 1
            plt.plot(valid_loss_dict[batch_size_key],
                     label=f'Valid Loss,{batch_size_key}',
                     c=colors[color_index])
            color_index += 1
            plt.plot(right_ratio_dict[batch_size_key],
                     label=f'Valid Accuracy,{batch_size_key}',
                     c=colors[color_index])
            color_index += 1
        plt.xlabel('epoch')
        plt.ylabel('Loss & Accuracy')
        plt.legend(loc='upper right')
        plt.savefig(
            f"src/rec_batch/plt_img/{self.model_name}_{self.axis}_train_loss_batch_index_64.png"
        )
        plt.show()
        plt.close()