コード例 #1
0
 def __init__(self):
     self.model_parameters = get_parameters()
     self.define_cuda()
     self.set_model_data()
コード例 #2
0
# -*- coding: utf-8 -*-
import torch
import torch.optim as optim

from common.globals_args import root, fn_graph_file
from grounding.ranking.path_match_nn.sequence_loader import SeqRankingLoader
from grounding.ranking.path_match_nn.model import PathRanking
from grounding.ranking.path_match_nn.parameters import get_parameters

model_parameters = get_parameters()

model_parameters.cuda=False
torch.manual_seed(model_parameters.seed)
if not model_parameters.cuda:
    model_parameters.gpu = -1
if torch.cuda.is_available() and model_parameters.cuda:
    print("Note: You are using GPU for training")
    torch.cuda.set_device(model_parameters.gpu)
    torch.cuda.manual_seed(model_parameters.seed)
if torch.cuda.is_available() and not model_parameters.cuda:
    print("Warning: You have Cuda but do not use it. You are using CPU for training")

def train(train_file,val_file,model_file):
    model = PathRanking(model_parameters=model_parameters)
    train_loader = SeqRankingLoader(train_file,model_parameters, model_parameters.gpu)
    val_loader = SeqRankingLoader(val_file,model_parameters, model_parameters.gpu)
    if model_parameters.cuda:
        model.cuda()
        print("Shift model to GPU")
    for name, param in model.named_parameters():
        print(name, param.size())