示例#1
0
    def train_test(self, data, label):
        model = newCNN.Model(data.shape[1]).to(device)
        cost = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=0.00001,
                                     weight_decay=0.01)  # 0.00001,0.01
        # print("sz: ", self.train_size, self.test_size)
        train_batchs = self.train_size // batch_size
        test_batchs = self.test_size // batch_size
        data_test = torch.from_numpy(data[self.train_size:self.train_size +
                                          self.test_size]).to(device)
        label_test = torch.from_numpy(label[self.train_size:self.train_size +
                                            self.test_size]).to(device)
        model.train()
        for epoch in range(Epoch):
            train_indices = np.random.permutation(self.train_size)
            data_train = torch.from_numpy(data[train_indices]).to(device)
            label_train = torch.from_numpy(label[train_indices]).to(device)
            for i in range(train_batchs):
                inputs = Variable(
                    data_train[i *
                               batch_size:min((i + 1) *
                                              batch_size, self.train_size), :],
                    requires_grad=False).view(-1, 1, data.shape[1])
                targets = Variable(
                    label_train[i *
                                batch_size:min((i + 1) *
                                               batch_size, self.train_size)],
                    requires_grad=False)

                num = min(
                    (i + 1) * batch_size, self.train_size) - i * batch_size + 1
                if num < batch_size:
                    continue

                outputs = model(inputs)
                _, pred = torch.max(outputs.data, 1)
                optimizer.zero_grad()
                loss = cost(outputs, targets)
                loss.backward()
                optimizer.step()

        model.eval()

        FP = 0
        TN = 0
        TP = 0
        FN = 0
        prediction = np.zeros(self.test_size, dtype=np.uint8)
        # prob = np.zeros(test_size)

        for i in range(test_batchs):
            inputs = Variable(
                data_test[i * batch_size:min((i + 1) *
                                             batch_size, self.test_size), :],
                requires_grad=False).view(-1, 1, data.shape[1])
            targets = Variable(
                label_test[i * batch_size:min((i + 1) *
                                              batch_size, self.test_size)],
                requires_grad=False)

            num = min((i + 1) * batch_size, self.test_size) - i * batch_size
            if num < batch_size:
                break

            outputs = model(inputs)

            pred = np.argmax(outputs.data.cpu().numpy(), axis=1)
            prediction[i * batch_size:min((i + 1) *
                                          batch_size, self.test_size)] = pred

            for j in range(len(pred)):
                if pred[j] == 0:
                    if targets[j] == 0:
                        TN = TN + 1
                    else:
                        FN += 1
                else:
                    if targets[j] == 0:
                        FP = FP + 1
                    else:
                        TP += 1

        # print("shape: ", label_test.shape, prediction.shape)
        # print("TN=", TN, "TP=", TP, "FP=", FP, "FN=", FN)
        lb_cpu = label_test.data.cpu().numpy()
        precision = precision_score(lb_cpu, prediction)
        recall = recall_score(lb_cpu, prediction)
        f1 = f1_score(lb_cpu, prediction)
        print("acc  = ", accuracy_score(lb_cpu, prediction))
        if TN + FP > 0:
            fpr = FP / (TN + FP)
        else:
            fpr = 0
        return f1, precision, recall, fpr
示例#2
0
 def __init__(self):
     self.model = newCNN.Model()
     self.cost = torch.nn.CrossEntropyLoss()
     self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.00001, weight_decay=0.01)  # 0.00001,0.01
示例#3
0
 def __init__(self, sz=155, lr=0.001):
     self.sz = sz
     self.model = newCNN.Model(sz).to(device)
     self.cost = torch.nn.CrossEntropyLoss()
     self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr)
示例#4
0
        unlabeled_label = np.array([-1] * train_unlabeled_size)
        indices = np.random.permutation(train_size)

        all_train_label = torch.from_numpy((np.concatenate(
            (train_label,
             unlabeled_label)))[indices].astype(np.longlong)).to(device)
        all_train_data = torch.from_numpy((np.concatenate(
            (train_labeled_data, train_unlabeled_data)))[indices]).to(device)

        test_data = torch.from_numpy(test_data).to(device)
        test_label = torch.from_numpy(test_label)

        shape_1 = train_labeled_data.shape[1]

        step_counter = 0
        stu = newCNN.Model(shape_1).to(device)
        teacher = newCNN.Model(shape_1).to(device)
        for param in teacher.parameters():
            param.detach_()

        optimizer = torch.optim.Adam(stu.parameters())
        f1 = 0
        precision = 0
        recall = 0
        acc = 0
        for epoch in range(4):
            print("epoch: ", epoch)
            train(stu, teacher, all_train_data, all_train_label, optimizer,
                  epoch, step_counter)

            print("test....")
示例#5
0
文件: run.py 项目: zyyrrr/FEAD
    print("the label size is", train_labeled_size)
    number = 10  # 取10次平均
    F1s = []
    ACCs = []
    for _ in range(number):

        train_unlabeled_size = train_size - train_labeled_size

        train_labeled_data, train_label, train_unlabeled_data, test_data, test_label = getData(
            data_path, labels_path, train_labeled_size, train_unlabeled_size,
            test_size)
        test_data = torch.from_numpy(test_data).to(device)
        dataset_unlabeled = torch.utils.data.TensorDataset(
            torch.from_numpy(train_unlabeled_data))
        loader_labeled = DataLoad(train_labeled_data, train_label, batch_size)
        model = newCNN.Model(test_data.shape[1]).to(device)
        cost = torch.nn.CrossEntropyLoss()
        optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

        pre_train_epochs = 10
        train_epochs = 10

        # train model use labeled data
        print("pre-train")
        model.train()

        train_batch = train_labeled_size // batch_size

        for epoch in range(pre_train_epochs):
            running_correct = 0
            loss_sum = 0