def list_and_count_test(n=10000):
    pk_list = [ str(m.pk) for m in Model1.objects() ]
    # warmer
    for k in pk_list:
        m = Model1.objects.get(pk=k)

    m = 0
    s = 0
    print 'Count&List test (operations count: %d):' % n
    start_time = time.time()
    for i in range(n):
        models = Model1.objects(created__lt = rand.choice(dates))
        if rand.randint(0, 10) > 5:
            models.filter(volume__gt = rand.choice(val))
        if rand.randint(0, 10) > 5:
            models.filter(name__contains = rand.choice(chars))
        if models.count() > 0:
            models.limit(20)
            l = models.cache
            m += models.count()
            for obj in l:
                if obj.pk:
                    s += sys.getsizeof(obj)
    
    print 'time: %s' % str(time.time() - start_time)
    print 'object count: %d' % m
    print 'total lists size %s mb' % str(float(s)/float(1024**2))[0:5]
def create_models(n=10000, only=[1, 2, 3]):
    if 1 in only:
        print 'will be created %d Model1' % n
        for i in range(n):
            m = Model1(name=random_hex(),
                       volume=rand.randint(1, n),
                       created=random_date())
            m.save()
    
    k = Model1.objects().count()

    if 2 in only:
        print 'will be created %d Model2' % n
        
        for i in range(n):
            t = rand.randint(2, k - 2)
            m = Model2(name=random_hex(),
                       model1=Model1.objects()[t:t + 1][0],
                       count=rand.randint(1, n),
                       created=random_date())
            m.save()

    if 3 in only:
        print 'will be created %d Model3' % n
        for i in range(n):
            m = Model3(name=random_hex(),
                       model1=[],
                       count=rand.randint(1, n),
                       created=random_date())
            for j in range(rand.randint(1, 16)):
                t = rand.randint(2, k - 2)
                m.model1.append(Model1.objects()[t:t + 1][0])
            m.save()
Exemple #3
0
def train_humanities():
    raw = pd.read_csv('./aiA/09118120徐浩卿/raw_humanities.csv')
    X = raw[['stu_rank', 'stu_long', 'stu_lati']]
    y = raw[['uni_rank', 'uni_long', 'uni_lati']]
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.25,
                                                        random_state=0)

    trainset = UniversityDataset(X_train.join(y_train))
    testset = UniversityDataset(X_test.join(y_test))
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=32,
                                              shuffle=True)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=32,
                                             shuffle=False)

    model1 = Model1().double().cuda()
    criterion = Model1Loss().cuda()
    optimizer = optim.SGD(model1.parameters(), lr=0.1)

    for epoch in range(2):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs
            inputs, labels = data
            inputs = inputs.cuda()
            labels = labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = model1(inputs)
            loss = criterion(outputs, labels)
            loss.backward()
            #for parms in model1.parameters():
            #    print('-->grad_requirs:',parms.requires_grad,' -->grad_value:',parms.grad)
            optimizer.step()

            # print statistics
            running_loss += loss.item()
            if i % 2000 == 1999:  # print every 2000 mini-batches
                print('[%d, %5d] loss: %.6f' %
                      (epoch + 1, i + 1, running_loss / 2000))
                running_loss = 0.0

    print('Finished Training')
    with torch.no_grad():  # when in test stage, no grad
        loss = 0
        for (inputs, labels) in testloader:
            inputs = inputs.cuda()
            labels = labels.cuda()
            out = model1(inputs)
            loss += criterion(out, labels)
    print('Test MSE: {}'.format(loss))
    torch.save(model1.state_dict(), './aiA/09118120徐浩卿/humanities.mod1')
def get_test(n=50000):
    pk_list = [ str(m.pk) for m in Model1.objects() ]
    # warmer
    for k in pk_list:
        m = Model1.objects.get(pk=k)

    print 'Get test (operations count: %d):' % n
    start_time = time.time()
    s = 0
    for i in range(n):
        k = rand.choice(pk_list)
        m = Model1.objects.get(pk=k)
        s += sys.getsizeof(m)
    
    print 'time: %s' % str(time.time() - start_time)
    print 'total lists size %s mb' % str(float(s)/float(1024**2))[0:5]
def reference_list_test(n=1000):
    # warmer
    pk_list = [ str(m.pk) for m in Model1.objects() ]
    for k in pk_list:
        m = Model1.objects.get(pk=k)
    pk_list = [ str(m.pk) for m in Model3.objects() ]
    for k in pk_list:
        m = Model3.objects.get(pk=k)

    print 'Reference list test (operations count: %d):' % n
    start_time = time.time()
    s = 0
    for i in range(n):
        k = rand.choice(pk_list)
        m = Model3.objects.get(pk=k)
        s += sys.getsizeof(m)
        for m1 in m.model1:
            s += sys.getsizeof(m1)
    
    print 'time: %s' % str(time.time() - start_time)
    print 'total lists size %s mb' % str(float(s)/float(1024**2))[0:5]
    'decay': 0.01,
    'adjacency': adjacency,
    'baseline': baseline,
    'delta00': 3.12284148,
    'delta01': 0.16178544,
    'delta10': 2.56444489,
    'delta11': 0.29539388,
    'rho': 0.6907,
    'u': 89.9719,
    'vmin': 10,
    'beta': 8.05708084,
    'pi': 0.7955,
    'alpha': 23.94028007
}

model1_normal = Model1(parameters=params1)
model1_high = Model1(parameters=params1low)
model1_low = Model1(parameters=params1high)
model2 = Model4(parameters=params2b)
model5 = Model5(parameters=params5)
#%% Gen Out flows function


def gen_outflows(n, model, t, dt, lob_init):
    """
    Generates sample of n outflows for the given model during [t, t+dt]

    :param n: number of outflows to generate
    :param model: model object to use for simulation
    :param t: beginning of time interval to measure outflows
    :param dt: end of time interval to measure outflows
import torch
import pandas as pd
import numpy as np
import heapq
import os
from models import Model1, Model2, UniversityDataset

data_humanities = pd.read_csv('./aiA/09118120徐浩卿/raw_humanities.csv')
data_science = pd.read_csv('./aiA/09118120徐浩卿/raw_science.csv')
data_all = pd.read_csv('./aiA/09118120徐浩卿/raw_all.csv')
data_university = pd.read_csv('./aiA/09118120徐浩卿/colle_shrink.csv')

model1_human = Model1().double()
model1_human.load_state_dict(torch.load('./aiA/09118120徐浩卿/humanities.mod1'))
model1_human.eval()
ideal_colle = model1_human(
    torch.tensor(data_humanities[['stu_rank', 'stu_long', 'stu_lati']].values))

model2_data_humanities = pd.DataFrame(
    columns=['uni_rank', 'long_diff', 'lati_diff', 'label'])

for i in range(len(data_humanities)):
    student = data_humanities.iloc[i]
    colle = ideal_colle.iloc[i]

    distances = []  # 用于存储每所可选大学与预测大学的距离
    better_schools = []
    for school in data_university:
        distances.append(
            torch.dist(
                torch.tensor(school[['uni_rank', 'uni_long', 'uni_lati']]),
Exemple #8
0
    'decay': 0.01,
    'adjacency': adjacency,
    'baseline': baseline,
    'delta00': 3.12284148,
    'delta01': 0.16178544,
    'delta10': 2.56444489,
    'delta11': 0.29539388,
    'rho': 0.6907,
    'u': 89.9719,
    'vmin': 10,
    'beta': 8.05708084,
    'pi': 0.7955,
    'alpha': 23.94028007
}

model1 = Model1(parameters=params1b)
model4 = Model4(parameters=params4)

# %% Examine Runtime scaling
simulator = Simulator(lob_init=lob, model=model1)
times = np.arange(60, 10 * 60 * 60, 30 * 60)
run_times = np.zeros(len(times))
num = 10
for t in range(len(times)):
    start = timer.time()
    res_model1 = simulator.run(n=num, obs_freq=0.5, run_time=times[t])
    run_times[t] = (timer.time() - start) / num
    print(t)

#%% run time plot
beta = sum(run_times * times / 60) / sum(np.power(times / 60, 2))
Exemple #9
0
    y = raw[['uni_rank', 'uni_long', 'uni_lati']]
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.25,
                                                        random_state=0)

    trainset = UniversityDataset(X_train.join(y_train))
    testset = UniversityDataset(X_test.join(y_test))
    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=32,
                                              shuffle=True)
    testloader = torch.utils.data.DataLoader(testset,
                                             batch_size=32,
                                             shuffle=False)

    model1 = Model1().double().cuda()
    criterion = Model1Loss().cuda()
    optimizer = optim.SGD(model1.parameters(), lr=0.1)

    for epoch in range(2):
        running_loss = 0.0
        for i, data in enumerate(trainloader, 0):
            # get the inputs
            inputs, labels = data
            inputs = inputs.cuda()
            labels = labels.cuda()

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize