def showStudentData(self):
     string = self.search_input.get()
     data = dt.getInfor(string, 'student')
     if data == None:
         tkm.showwarning(title='系统提示', message='未查找到任何信息')
         return
     rank = dt.getRank(name=string, id=string)
     data = dt.normalization(data)
     infor_label = ttk.Label(self.infor_frame, text=data)
     rank_label = ttk.Label(self.infor_frame, text=' 排名\t' + str(rank))
     infor_label.grid(row=4, column=2, pady=10)
     rank_label.grid(row=5, column=2, sticky='w')
     return
    def searchPage(self):
        #销毁前一次界面
        self.infor_frame.destroy()
        self.infor_frame = ttk.Frame(self.root,
                                     padding=(10, 10, 10, 10),
                                     relief='sunken')
        self.infor_frame.pack()
        data = dt.getInfor(self.id, self.user)
        rank = dt.getRank(id=self.id)
        data = dt.normalization(data)
        data_label = ttk.Label(self.infor_frame, text=data)
        rank_label = ttk.Label(self.infor_frame, text='排名')
        rank_num_label = ttk.Label(self.infor_frame, text=str(rank))
        return_button = ttk.Button(self.infor_frame,
                                   text='返回',
                                   command=self.return_menu)

        data_label.grid(row=1, column=1)
        rank_label.grid(row=2, column=1, sticky='w', pady=10)
        rank_num_label.grid(row=2, column=2, sticky='n', pady=10)
        return_button.grid(row=3, column=1)
        pass
Exemple #3
0
device = "cuda" if torch.cuda.is_available() else "cpu"
model = GCN_Net().to(device)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(),
                       lr=learning_rate,
                       weight_decay=weight_decay)

dataset = dgl.data.CiteseerGraphDataset()
graph = dataset[0]
tensor_x = graph.ndata['feat'].to(device)
tensor_y = graph.ndata['label'].to(device)
tensor_train_mask = graph.ndata['train_mask'].to(device)
tensor_val_mask = graph.ndata['val_mask'].to(device)
tensor_test_mask = graph.ndata['test_mask'].to(device)
graph = read_data(osp.join("citeseer", "raw", "ind.citeseer.graph"))
normalize_adjacency = normalization(build_adjacency(graph))

indices = torch.from_numpy(
    np.asarray([normalize_adjacency.row,
                normalize_adjacency.col]).astype('int64')).long()
values = torch.from_numpy(normalize_adjacency.data.astype(np.float32))
tensor_adjacency = torch.sparse.FloatTensor(indices, values,
                                            (3327, 3327)).to(device)


def train():
    loss_history = []
    val_acc_history = []
    model.train()
    train_y = tensor_y[tensor_train_mask]
    for epoch in range(epochs):
Exemple #4
0
criterion = nn.CrossEntropyLoss().to(device)
optimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)





dataset = dgl.data.CiteseerGraphDataset()
graph = dataset[0]
tensor_x = graph.ndata['feat'].to(device)
tensor_y = graph.ndata['label'].to(device)
tensor_train_mask = graph.ndata['train_mask'].to(device)
tensor_val_mask = graph.ndata['val_mask'].to(device)
tensor_test_mask = graph.ndata['test_mask'].to(device)
graph = read_data(osp.join("citeseer", "raw", "ind.citeseer.graph"))
normalized_Laplacian = normalization(build_adjacency(graph))

indices = torch.from_numpy(np.asarray([normalized_Laplacian.row,
                                       normalized_Laplacian.col]).astype('int64')).long()
values = torch.from_numpy(normalized_Laplacian.data.astype(np.float32))
Laplacian_tensor_ = torch.sparse.FloatTensor(indices, values,
                                            (ncount, ncount)).to(device)

identity_list_ = [1 for i in range(ncount)]
identity_coo_ = sp.spdiags(identity_list_, diags=[0], m=ncount, n=ncount, format="coo")
indices = torch.from_numpy(np.asarray([identity_coo_.row,
                                       identity_coo_.col]).astype('int64')).long()
values = torch.from_numpy(identity_coo_.data.astype(np.float32))
identity_tensor_ = torch.sparse.FloatTensor(indices, values,
                                            (ncount, ncount)).to(device)
poly_item1 = 1 * identity_tensor_
def main(isplot=False):
    """
    Main function
    :param isplot: if True plot the prediction and errors
    :return:
    """
    # --------------------------------------------------------------------------------------------------
    # PARAMETERS
    # --------------------------------------------------------------------------------------------------
    nb_samples = 1000
    iteration = 10

    # Model
    input_dim = 2
    output_dim = 2
    hidden_dim = 25

    # Training
    nb_epochs = 200
    learning_rate = 1e-2
    batch_size = 10

    saved_train_error = []
    saved_train_time = []
    saved_test_error = []
    saved_prediction_time = []

    for i in range(iteration):
        print('\n------- ITERATION - %d -------' % (i + 1))
        # --------------------------------------------------------------------------------------------------
        # DATASET
        # --------------------------------------------------------------------------------------------------
        # Generate data
        train_input, train_label = generate_data(nb_samples)
        train_label = convert_to_one_hot_labels(train_label)

        test_input, test_label_vector = generate_data(nb_samples)
        test_label = convert_to_one_hot_labels(test_label_vector)

        print('Training data dimension: ', train_input.size())
        print('Training labels dimension: ', train_label.size())

        # Normalize data
        train_input = normalization(train_input)
        test_input = normalization(test_input)

        # --------------------------------------------------------------------------------------------------
        # MODEL
        # --------------------------------------------------------------------------------------------------
        model = Sequential(Linear(input_dim, hidden_dim), Tanh(),
                           Linear(hidden_dim, hidden_dim), Tanh(),
                           Linear(hidden_dim, hidden_dim), Tanh(),
                           Linear(hidden_dim, output_dim))

        # Xavier initialization
        for i in range(0, len(model.param()), 2):
            xavier_initialization(model.param()[i][0],
                                  model.param()[i + 1][0], 'relu')

        # --------------------------------------------------------------------------------------------------
        # TRAINING
        # --------------------------------------------------------------------------------------------------

        start_train_time = time.time()
        training(model,
                 train_input,
                 train_label,
                 batch_size=batch_size,
                 nb_epochs=nb_epochs,
                 lr=learning_rate)
        end_train_time = time.time()

        # ERROR
        train_error = compute_error(model, train_input, train_label,
                                    batch_size) / train_input.size(0) * 100
        saved_train_error.append(train_error)
        test_error = compute_error(model, test_input, test_label,
                                   batch_size) / test_input.size(0) * 100
        saved_test_error.append(test_error)

        # Prediction time
        start_pred_time = time.time()
        for batch in range(0, test_input.size(0), batch_size):
            prediction(model, (test_input.narrow(0, batch, batch_size)))
        end_pred_time = time.time()

        train_time = end_train_time - start_train_time
        saved_train_time.append(train_time)
        prediction_time = end_pred_time - start_pred_time
        saved_prediction_time.append(prediction_time)

        print('\nTrain error {:.02f}% --- Train time {:.02f}s '
              '\nTest error {:.02f}% --- Prediction time {:.08f}s'.format(
                  train_error, train_time, test_error, prediction_time))
        # --------------------------------------------------------------------------------------------------
        # PLOT
        # --------------------------------------------------------------------------------------------------
        if isplot:
            test_predictions = prediction(model, test_input)
            prediction_errors = test_predictions != test_label_vector
            test_prediction_errors = test_predictions.clone()
            test_prediction_errors[prediction_errors] = 2

            plt.figure(figsize=(10, 3))
            plt.subplot(1, 3, 1)
            plt.scatter(test_input[:, 0],
                        test_input[:, 1],
                        s=5,
                        c=test_label_vector,
                        cmap=matplotlib.colors.ListedColormap(
                            ['deepskyblue', 'mediumblue']))
            plt.title('Labels', fontsize=10)
            plt.gca().set_aspect('equal', adjustable='box')
            plt.gca().axes.get_xaxis().set_ticks([])
            plt.gca().axes.get_yaxis().set_ticks([])

            plt.subplot(1, 3, 2)
            plt.scatter(test_input[:, 0],
                        test_input[:, 1],
                        s=5,
                        c=test_predictions,
                        cmap=matplotlib.colors.ListedColormap(
                            ['deepskyblue', 'mediumblue']))
            plt.title('Predictions', fontsize=10)
            plt.gca().set_aspect('equal', adjustable='box')
            plt.gca().axes.get_xaxis().set_ticks([])
            plt.gca().axes.get_yaxis().set_ticks([])

            plt.subplot(1, 3, 3)
            plt.scatter(test_input[:, 0],
                        test_input[:, 1],
                        s=5,
                        c=test_prediction_errors,
                        cmap=matplotlib.colors.ListedColormap(
                            ['deepskyblue', 'mediumblue', 'r']))
            plt.title('Errors', fontsize=10)
            plt.gca().set_aspect('equal', adjustable='box')
            plt.gca().axes.get_xaxis().set_ticks([])
            plt.gca().axes.get_yaxis().set_ticks([])
            plt.show()

    print('\nTRAIN: Mean {:.02f} --- Std {:.02f} --- time {:.02f}s '
          '\nTEST: Mean {:.02f}% --- Std {:.02f} --- time {:.08f}s'.format(
              torch.FloatTensor(saved_train_error).mean(),
              torch.FloatTensor(saved_train_error).std(),
              torch.FloatTensor(saved_train_time).mean(),
              torch.FloatTensor(saved_test_error).mean(),
              torch.FloatTensor(saved_test_error).std(),
              torch.FloatTensor(saved_prediction_time).mean()))
def main():
    """ Main function """
    # --------------------------------------------------------------------------------------------------
    # PARAMETERS
    # --------------------------------------------------------------------------------------------------
    nb_samples = 1000
    iteration = 10

    # Model
    input_dim = 2
    output_dim = 2
    hidden_dim = 25

    # Training
    number_epochs = 200
    learning_rate = 1e-2
    mini_batch_size = 10

    saved_train_error = []
    saved_train_time = []
    saved_test_error = []
    saved_prediction_time = []

    for i in range(iteration):
        print('\n------- ITERATION - %d -------' % (i + 1))
        # --------------------------------------------------------------------------------------------------
        # DATASET
        # --------------------------------------------------------------------------------------------------
        # Generate data
        train_input, train_label = generate_data(nb_samples)
        train_label = convert_to_one_hot_labels(train_label)

        test_input, test_label = generate_data(nb_samples)
        test_label = convert_to_one_hot_labels(test_label)

        print('Training data dimension: ', train_input.size())
        print('Training labels dimension: ', train_label.size())

        # Normalize data
        train_input = normalization(train_input)
        test_input = normalization(test_input)

        # --------------------------------------------------------------------------------------------------
        # MODEL
        # --------------------------------------------------------------------------------------------------
        model = nn.Sequential(
            nn.Linear(input_dim, hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, hidden_dim),
            nn.Tanh(),
            nn.Linear(hidden_dim, output_dim),
        )

        # Xavier initialization
        model.apply(init_weights)

        # --------------------------------------------------------------------------------------------------
        # TRAINING
        # --------------------------------------------------------------------------------------------------
        start_train_time = time.time()
        training(model,
                 train_input,
                 train_label,
                 batch_size=mini_batch_size,
                 nb_epochs=number_epochs,
                 lr=learning_rate)
        end_train_time = time.time()

        # ERROR
        train_error = compute_error(
            model, train_input, train_label,
            mini_batch_size) / train_input.size(0) * 100
        saved_train_error.append(train_error)
        test_error = compute_error(model, test_input, test_label,
                                   mini_batch_size) / test_input.size(0) * 100
        saved_test_error.append(test_error)

        # Prediction time
        start_pred_time = time.time()
        for batch in range(0, test_input.size(0), mini_batch_size):
            prediction(model, (test_input.narrow(0, batch, mini_batch_size)))
        end_pred_time = time.time()

        train_time = end_train_time - start_train_time
        saved_train_time.append(train_time)
        prediction_time = end_pred_time - start_pred_time
        saved_prediction_time.append(prediction_time)

        print('\nTrain error {:.02f}% --- Train time {:.02f}s '
              '\nTest error {:.02f}% --- Prediction time {:.08f}s'.format(
                  train_error, train_time, test_error, prediction_time))

    print('\nTRAIN: Mean {:.02f} --- Std {:.02f} --- time {:.02f}s '
          '\nTEST: Mean {:.02f}% --- Std {:.02f} --- time {:.08f}s'.format(
              torch.FloatTensor(saved_train_error).mean(),
              torch.FloatTensor(saved_train_error).std(),
              torch.FloatTensor(saved_train_time).mean(),
              torch.FloatTensor(saved_test_error).mean(),
              torch.FloatTensor(saved_test_error).std(),
              torch.FloatTensor(saved_prediction_time).mean()))