Exemplo n.º 1
0
def run_model(task=None, model_name=None, dataset_name=None, config_file=None,
              save_model=True, train=True, other_args=None):
    """
    Args:
        task(str): task name
        model_name(str): model name
        dataset_name(str): dataset name
        config_file(str): config filename used to modify the pipeline's
            settings. the config file should be json.
        save_model(bool): whether to save the model
        train(bool): whether to train the model
        other_args(dict): the rest parameter args, which will be pass to the Config
    """
    # load config
    config = ConfigParser(task, model_name, dataset_name,
                          config_file, other_args)
    # logger
    logger = get_logger(config)
    logger.info('Begin pipeline, task={}, model_name={}, dataset_name={}'.
                format(str(task), str(model_name), str(dataset_name)))
    # 加载数据集
    dataset = get_dataset(config)
    # 转换数据,并划分数据集
    train_data, valid_data, test_data = dataset.get_data()
    data_feature = dataset.get_data_feature()
    '''
    #add by 18231216
    import torch
    towr = open("oup.txt",'w')
    for i,ement in enumerate(train_data):
        for key in ement.data:
            print("key:{},body:{}".format(key,torch.Tensor(ement.data[key])),file = towr)
            print("#####################################",file = towr)
    towr.close()
    #'''
    '''
    #add by 18231216
    towr = open("oup.txt",'w')
    for key in enumerate(dataset.data):
        print("key:{},shape:{}".format(key,torch.Tensor(dataset.data.data[key]).shape),file = towr)
    towr.close()
    '''
    # 加载执行器
    model_cache_file = './trafficdl/cache/model_cache/{}_{}.m'.format(
        model_name, dataset_name)
    model = get_model(config, data_feature)
    executor = get_executor(config, model)
    # 训练
    if train or not os.path.exists(model_cache_file):
        executor.train(train_data, valid_data)
        if save_model:
            executor.save_model(model_cache_file)
    else:
        executor.load_model(model_cache_file)
    # 评估,评估结果将会放在 cache/evaluate_cache 下
    executor.evaluate(test_data)
Exemplo n.º 2
0
def run_model(task=None,
              model_name=None,
              dataset_name=None,
              config_file=None,
              save_model=True,
              train=True,
              other_args=None):
    """
    Args:
        task (str): task name
        model_name (str): model name
        dataset_name (str): dataset name
        config_file (str): config filename used to modify the pipeline's
            settings. the config file should be json.
        save_model (bool): whether to save the model
        train (bool): whether to train the model
        other_args (dict): the rest parameter args, which will be pass to
            the Config
    """

    # load config
    config = ConfigParser(task, model_name, dataset_name, config_file,
                          other_args)
    # logger
    logger = get_logger(config)
    logger.info(
        'Begin pipeline, task={}, model_name={}, dataset_name={}'.format(
            str(task), str(model_name), str(dataset_name)))
    # 加载数据集
    dataset = get_dataset(config)
    # 转换数据,并划分数据集
    train_data, valid_data, test_data = dataset.get_data()
    data_feature = dataset.get_data_feature()
    # 加载执行器
    model_cache_file = './trafficdl/cache/model_cache/{}_{}.m'.format(
        model_name, dataset_name)
    model = get_model(config, data_feature)
    executor = get_executor(config, model)
    # 训练
    if train or not os.path.exists(model_cache_file):
        executor.train(train_data, valid_data)
        if save_model:
            executor.save_model(model_cache_file)
    else:
        executor.load_model(model_cache_file)
    # 评估,评估结果将会放在 cache/evaluate_cache 下
    executor.evaluate(test_data)
Exemplo n.º 3
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_model, get_executor

# 加载配置文件
# config = ConfigParser(task='traj_loc_pred', model='TemplateTLP',
#                      dataset='foursquare_tky', config_file=None,
#                      other_args={'batch_size': 2})
# 如果是交通流量\速度预测任务,请使用下面的加载配置文件语句
config = ConfigParser(task='traffic_state_pred',
                      model='DGFN',
                      dataset='metr_la',
                      config_file=None,
                      other_args={'batch_size': 2})
# 加载数据模块
dataset = get_dataset(config)
# 数据预处理,划分数据集
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
# 抽取一个 batch 的数据进行模型测试
batch = train_data.__iter__().__next__()
# 加载模型
model = get_model(config, data_feature)
self = model.to(config['device'])
# 模型预测
batch.to_tensor(config['device'])
res = model.predict(batch)
# 请自行确认 res 的 shape 是否符合赛道的约束
# 如果要加载执行器的话
executor = get_executor(config, model)
Exemplo n.º 4
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_model, get_executor

# 加载配置文件
config = ConfigParser(task='traj_loc_pred', model='TemplateTLP',
                      dataset='foursquare_tky', config_file=None,
                      other_args={'batch_size': 2})
# 如果是交通流量\速度预测任务,请使用下面的加载配置文件语句
# config = ConfigParser(task='traffic_state_pred', model='TemplateTSP',
#       dataset='metr_la', config_file=None, other_args={'batch_size': 2})
# 加载数据模块
dataset = get_dataset(config)
# 数据预处理,划分数据集
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
# 抽取一个 batch 的数据进行模型测试
batch = train_data.__iter__().__next__()
# 加载模型
model = get_model(config, data_feature)
self = model.to(config['device'])
# 模型预测
batch.to_tensor(config['device'])
res = model.predict(batch)
# 请自行确认 res 的 shape 是否符合赛道的约束
# 如果要加载执行器的话
executor = get_executor(config, model)
Exemplo n.º 5
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_model

config = ConfigParser('traj_loc_pred', 'LSTPM', 'foursquare_tky', None,
                      {"history_type": 'cut_off'})
dataset = get_dataset(config)
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
batch = valid_data.__iter__().__next__()
model = get_model(config, data_feature)
self = model.to(config['device'])
batch.to_tensor(config['device'])
# batch['current_loc'] = torch.load('current_loc.pt')
# batch['current_tim'] = torch.load('current_tim.pt')
# batch['history_loc'] = torch.load('history_loc.pt')
# batch['history_tim'] = torch.load('history_tim.pt')
# batch['uid'] = torch.load('uid.pt')
# batch['target'] = torch.load('target.pt')
# self.load_state_dict(torch.load('model_state.m'))
logp_seq = self.forward(batch)
# executor = get_executor(config, model)
'''
batch_size = batch['current_loc'].shape[0]
pad_loc = torch.LongTensor([batch.pad_item['current_loc']] * batch_size).unsqueeze(1).to(self.device)
pad_tim = torch.LongTensor([batch.pad_item['current_tim']] * batch_size).unsqueeze(1).to(self.device)
expand_current_loc = torch.cat([batch['current_loc'], pad_loc], dim=1)
expand_current_tim = torch.cat([batch['current_tim'], pad_tim], dim=1)
origin_len = batch.get_origin_len('current_loc').copy()
for i in range(batch_size):
    origin_len[i] += 1
Exemplo n.º 6
0
import os
import torch
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_executor, get_model
from trafficdl.utils.dataset import parseCoordinate
from geopy import distance
import numpy as np
import torch.nn.functional as F

config = ConfigParser('traj_loc_pred', 'STRNN', 'foursquare_tky', None, None)
dataset = get_dataset(config)
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
# batch = valid_data.__iter__().__next__()
model = get_model(config, data_feature)
self = model.to(config['device'])
# batch.to_tensor(config['device'])
'''
user = batch['uid']
dst = batch['target'].tolist()
dst_time = batch['target_tim']
current_loc = batch['current_loc']
current_tim = batch['current_tim']
# 计算 td ld
batch_size = len(dst)
td = dst_time.unsqueeze(1) - current_tim
ld = torch.zeros(current_loc.shape).to(self.device)
loc_len = batch.get_origin_len('current_loc')
current_loc = current_loc.tolist()
for i in range(batch_size):
Exemplo n.º 7
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_model, get_executor

# 加载配置文件
# config = ConfigParser(task='traj_loc_pred', model='TemplateTLP',
#                       dataset='foursquare_tky', config_file=None,
#                       other_args={'batch_size': 2})
# 如果是交通流量\速度预测任务,请使用下面的加载配置文件语句
config = ConfigParser(task='traffic_state_pred',
                      model='ResLSTM',
                      dataset='BEIJING SUBWAY_10MIN',
                      config_file=None,
                      other_args={'batch_size': 2})
# 加载数据模块
dataset = get_dataset(config)
# 数据预处理,划分数据集
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
# 抽取一个 batch 的数据进行模型测试
batch = train_data.__iter__().__next__()
# 加载模型
model = get_model(config, data_feature)
self = model.to(config['device'])
# 模型预测
batch.to_tensor(config['device'])
res = model.predict(batch)
# 请自行确认 res 的 shape 是否符合赛道的约束
# 如果要加载执行器的话
executor = get_executor(config, model)
Exemplo n.º 8
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_model, get_executor

# 加载配置文件
# config = ConfigParser(task='traj_loc_pred', model='TemplateTLP',
#                       dataset='foursquare_tky', config_file=None,
#                       other_args={'batch_size': 2})
# 如果是交通流量\速度预测任务,请使用下面的加载配置文件语句
config = ConfigParser(task='traffic_state_pred',
                      model='DMVST',
                      dataset='NYCTaxi20160102',
                      config_file=None,
                      other_args={'batch_size': 2})
# 加载数据模块
dataset = get_dataset(config)
# 数据预处理,划分数据集
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
# 抽取一个 batch 的数据进行模型测试
batch = train_data.__iter__().__next__()
# 加载模型
model = get_model(config, data_feature)
self = model.to(config['device'])
# 模型预测
batch.to_tensor(config['device'])
res = model.predict(batch)
# 请自行确认 res 的 shape 是否符合赛道的约束
# 如果要加载执行器的话
executor = get_executor(config, model)
Exemplo n.º 9
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_model, get_executor

# 加载配置文件
#config = ConfigParser(task='traj_loc_pred', model='TemplateTLP',
#                      dataset='foursquare_tky', config_file=None,
#                      other_args={'batch_size': 2})
# 如果是交通流量\速度预测任务,请使用下面的加载配置文件语句
config = ConfigParser(task='traffic_state_pred',
                      model='CRANN',
                      dataset='M_DENSE',
                      config_file=None,
                      other_args={'batch_size': 2})
# 加载数据模块
dataset = get_dataset(config)
# 数据预处理,划分数据集
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
# 抽取一个 batch 的数据进行模型测试
batch = train_data.__iter__().__next__()
# 加载模型
model = get_model(config, data_feature)
self = model.to(config['device'])
# 模型预测
batch.to_tensor(config['device'])
res = model.predict(batch)
# 请自行确认 res 的 shape 是否符合赛道的约束
# 如果要加载执行器的话
executor = get_executor(config, model)
Exemplo n.º 10
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_model, get_executor

# 加载配置文件
config = ConfigParser(task='traj_loc_pred',
                      model='FlashbackTrainer',
                      dataset='foursquare_tky',
                      config_file=None,
                      other_args={'batch_size': 2})
# 如果是交通流量\速度预测任务,请使用下面的加载配置文件语句
# config = ConfigParser(task='traffic_state_pred', model='TemplateTSP',
#       dataset='metr_la', config_file=None, other_args={'batch_size': 2})
# 加载数据模块
dataset = get_dataset(config)
# 数据预处理,划分数据集
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
# 抽取一个 batch 的数据进行模型测试
batch = train_data.__iter__().__next__()
# 加载模型
model = get_model(config, data_feature)
self = model.to(config['device'])
# 模型预测
batch.to_tensor(config['device'])
res = model.predict(batch)
# 请自行确认 res 的 shape 是否符合赛道的约束
# 如果要加载执行器的话
executor = get_executor(config, model)
Exemplo n.º 11
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_model, get_executor

# 加载配置文件
config = ConfigParser(task='traffic_state_pred',
                      model='MRA_BGCN',
                      dataset='PEMS_BAY',
                      config_file=None,
                      other_args={'batch_size': 2})
# 如果是交通流量\速度预测任务,请使用下面的加载配置文件语句
# config = ConfigParser(task='traffic_state_pred', model='TemplateTSP',
#       dataset='metr_la', config_file=None, other_args={'batch_size': 2})
# 加载数据模块
dataset = get_dataset(config)
# 数据预处理,划分数据集
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
# 抽取一个 batch 的数据进行模型测试
batch = train_data.__iter__().__next__()
# 加载模型
model = get_model(config, data_feature)
self = model.to(config['device'])
# 模型预测
batch.to_tensor(config['device'])
res = model.predict(batch)
# 请自行确认 res 的 shape 是否符合赛道的约束
# 如果要加载执行器的话
executor = get_executor(config, model)
Exemplo n.º 12
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset

task = 'traj_loc_pred'
model = 'DeepMove'
dataset = 'foursquare_tky'

other_args = {
    'batch_size': 1
}
config = ConfigParser(task, model, dataset, config_file=None, other_args=other_args)
dataset = get_dataset(config)
train_data, valid_data, test_data = dataset.get_data()
Exemplo n.º 13
0
from trafficdl.config import ConfigParser
from trafficdl.data import get_dataset
from trafficdl.utils import get_model, get_executor

# 加载配置文件
config = ConfigParser(task='traj_loc_pred',
                      model='ATSTLSTM',
                      dataset='gowalla',
                      config_file=None,
                      other_args={"gpu": False})
# 如果是交通流量\速度预测任务,请使用下面的加载配置文件语句
# config = ConfigParser(task='traffic_state_pred', model='TemplateTSP',
#       dataset='metr_la', config_file=None, other_args={'batch_size': 2})

# 加载数据模块
dataset = get_dataset(config)
print('*** get_dataset OK ***')

# 数据预处理,划分数据集
train_data, valid_data, test_data = dataset.get_data()
data_feature = dataset.get_data_feature()
print('*** data_preprocessing OK ***')

# 抽取一个 batch 的数据进行模型测试
batch = train_data.__iter__().__next__()
print('*** get_batch OK ***')

# 加载模型
model = get_model(config, data_feature)
self = model.to(config['device'])
print('*** get_model OK ***')