示例#1
0
import torch
from torch.optim import lr_scheduler
import torch.optim as optim
import torch.nn as nn
import numpy as np
import logging
import sys
sys.path.append("../")

from _utils import getLogger

logger = getLogger(__name__)


class MultipletTrainer:
    def __init__(self, train_loader, valid_loader, test_loader, model, loss_fn,
                 sim_fn, device):

        self.train_loader = train_loader
        self.val_loader = valid_loader
        self.test_loader = test_loader

        self.model = model
        if torch.cuda.device_count() > 1:
            self.model = nn.DataParallel(self.model)

        self.loss_fn = loss_fn  #ロス関数
        self.sim_fn = sim_fn  #類似度関数

        self.device = device
示例#2
0
import numpy as np
import matplotlib.pyplot as plt
from time import time
import os
import re
import logging

from datasets.getDataLoader import getDataLoader
from models.getModel import getModel
from distances.getDistance import getClusterDistance
from predictors.Predictor import Predictor

from _utils import getLogger, setLogLevel, setLogFile

pil_logger = getLogger('PIL')
pil_logger.setLevel(logging.INFO)
logger = getLogger(__name__)


def main(cfg_known_valid_path,
         cfg_known_inf_path,
         cfg_unknown_inf_path,
         weight_path,
         cluster_distance_name,
         thread,
         save=True):
    #load data
    pattern = ".*out(\d+).*"
    match = re.match(pattern, weight_path)
    num_out = int(match.group(1))