Exemplo n.º 1
0
def train_humanities():
    raw = pd.read_csv('./aiA/09118120徐浩卿/raw_humanities.csv')
    X = raw[['stu_rank', 'stu_long', 'stu_lati']]
    y = raw[['uni_rank', 'uni_long', 'uni_lati']]
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.25,
                                                        random_state=0)

    trainset = UniversityDataset(X_train.join(y_train))
    testset = UniversityDataset(X_test.join(y_test))
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=32,
                                              shuffle=True)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=32,
                                             shuffle=False)

    model1 = Model1().double().cuda()
    criterion = Model1Loss().cuda()
    optimizer = optim.SGD(model1.parameters(), lr=0.1)

    for epoch in range(2):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs
            inputs, labels = data
            inputs = inputs.cuda()
            labels = labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = model1(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            #for parms in model1.parameters():
            #    print('-->grad_requirs:',parms.requires_grad,' -->grad_value:',parms.grad)
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if i % 2000 == 1999:  # print every 2000 mini-batches
                print('[%d, %5d] loss: %.6f' %
                      (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0

    print('Finished Training')
    with torch.no_grad():  # when in test stage, no grad
        loss = 0
        for (inputs, labels) in testloader:
            inputs = inputs.cuda()
            labels = labels.cuda()
            out = model1(inputs)
            loss += criterion(out, labels)
    print('Test MSE: {}'.format(loss))
    torch.save(model1.state_dict(), './aiA/09118120徐浩卿/humanities.mod1')
    'decay': 0.01,
    'adjacency': adjacency,
    'baseline': baseline,
    'delta00': 3.12284148,
    'delta01': 0.16178544,
    'delta10': 2.56444489,
    'delta11': 0.29539388,
    'rho': 0.6907,
    'u': 89.9719,
    'vmin': 10,
    'beta': 8.05708084,
    'pi': 0.7955,
    'alpha': 23.94028007
}

model1_normal = Model1(parameters=params1)
model1_high = Model1(parameters=params1low)
model1_low = Model1(parameters=params1high)
model2 = Model4(parameters=params2b)
model5 = Model5(parameters=params5)
#%% Gen Out flows function


def gen_outflows(n, model, t, dt, lob_init):
    """
    Generates sample of n outflows for the given model during [t, t+dt]

    :param n: number of outflows to generate
    :param model: model object to use for simulation
    :param t: beginning of time interval to measure outflows
    :param dt: end of time interval to measure outflows
Exemplo n.º 3
0
import torch
import pandas as pd
import numpy as np
import heapq
import os
from models import Model1, Model2, UniversityDataset

data_humanities = pd.read_csv('./aiA/09118120徐浩卿/raw_humanities.csv')
data_science = pd.read_csv('./aiA/09118120徐浩卿/raw_science.csv')
data_all = pd.read_csv('./aiA/09118120徐浩卿/raw_all.csv')
data_university = pd.read_csv('./aiA/09118120徐浩卿/colle_shrink.csv')

model1_human = Model1().double()
model1_human.load_state_dict(torch.load('./aiA/09118120徐浩卿/humanities.mod1'))
model1_human.eval()
ideal_colle = model1_human(
    torch.tensor(data_humanities[['stu_rank', 'stu_long', 'stu_lati']].values))

model2_data_humanities = pd.DataFrame(
    columns=['uni_rank', 'long_diff', 'lati_diff', 'label'])

for i in range(len(data_humanities)):
    student = data_humanities.iloc[i]
    colle = ideal_colle.iloc[i]

    distances = []  # 用于存储每所可选大学与预测大学的距离
    better_schools = []
    for school in data_university:
        distances.append(
            torch.dist(
                torch.tensor(school[['uni_rank', 'uni_long', 'uni_lati']]),
Exemplo n.º 4
0
    'decay': 0.01,
    'adjacency': adjacency,
    'baseline': baseline,
    'delta00': 3.12284148,
    'delta01': 0.16178544,
    'delta10': 2.56444489,
    'delta11': 0.29539388,
    'rho': 0.6907,
    'u': 89.9719,
    'vmin': 10,
    'beta': 8.05708084,
    'pi': 0.7955,
    'alpha': 23.94028007
}

model1 = Model1(parameters=params1b)
model4 = Model4(parameters=params4)

# %% Examine Runtime scaling
simulator = Simulator(lob_init=lob, model=model1)
times = np.arange(60, 10 * 60 * 60, 30 * 60)
run_times = np.zeros(len(times))
num = 10
for t in range(len(times)):
    start = timer.time()
    res_model1 = simulator.run(n=num, obs_freq=0.5, run_time=times[t])
    run_times[t] = (timer.time() - start) / num
    print(t)

#%% run time plot
beta = sum(run_times * times / 60) / sum(np.power(times / 60, 2))
Exemplo n.º 5
0
    y = raw[['uni_rank', 'uni_long', 'uni_lati']]
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.25,
                                                        random_state=0)

    trainset = UniversityDataset(X_train.join(y_train))
    testset = UniversityDataset(X_test.join(y_test))
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=32,
                                              shuffle=True)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=32,
                                             shuffle=False)

    model1 = Model1().double().cuda()
    criterion = Model1Loss().cuda()
    optimizer = optim.SGD(model1.parameters(), lr=0.1)

    for epoch in range(2):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs
            inputs, labels = data
            inputs = inputs.cuda()
            labels = labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize