def __init__(self, arch, model, lr=1e-5, batch_size=64): self.arch = arch if not os.path.exists(f'{arch}/ckpt/'): os.makedirs(f'{arch}/ckpt/') self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') self.batch_size = batch_size self.model = model self.model.to(self.device) self.criterion = nn.MSELoss() self.optimizer = torch.optim.Adam(self.model.parameters(), lr=lr, weight_decay=1e-4) self.scheduler = StepLR(self.optimizer, step_size=100, gamma=0.5) self.history = {'loss':[]} same_seeds(0)
def __init__(self, arch, model, lr, batch_size, wd): self.arch = arch if not os.path.exists(f'{arch}/ckpt/'): os.makedirs(f'{arch}/ckpt/') self.device = torch.device( 'cuda') if torch.cuda.is_available() else torch.device('cpu') self.batch_size = batch_size self.model = model self.model.to(self.device) self.criterion = nn.MSELoss() self.opt = torch.optim.Adam(self.model.parameters(), lr=lr) self.scheduler = StepLR(self.opt, step_size=50, gamma=0.1) self.history = {'train_loss': [], 'valid_loss': []} self.min_loss = math.inf same_seeds(73)
def __init__(self, arch, model, batch_size, lr, accum_steps, device): self.arch = arch if not os.path.exists(arch): os.makedirs(arch) self.model = model self.batch_size = batch_size self.opt = torch.optim.Adam(self.model.parameters(), lr=lr, weight_decay=1e-2) self.scheduler = StepLR(self.opt, step_size=20, gamma=0.5) self.accum_steps = accum_steps self.criteria = torch.nn.CrossEntropyLoss() self.device = device self.history = {'train': [], 'valid': []} self.best_score = math.inf same_seeds(73)
def __init__(self, arch, device): self.arch = arch if not os.path.exists(f'{arch}/ckpts'): os.makedirs(f'{arch}/ckpts') self.feature_extractor = FeatureExtractor().to(device) self.label_predictor = LabelPredictor().to(device) self.optimizer_F = optim.Adam(self.feature_extractor.parameters(), lr=1e-4) self.optimizer_C = optim.Adam(self.label_predictor.parameters(), lr=1e-4) self.class_criterion = nn.CrossEntropyLoss() self.device = device self.history = {'loss': [], 'acc': []} self.best_score = math.inf same_seeds(73)
import os import sys import numpy as np import torch from torch import optim from torch.utils.data import DataLoader import torch.nn as nn from utils import same_seeds from dataset_baseline import Image_Dataset from dataset_baseline import preprocess from model_baseline import AE same_seeds(0) input_filename = sys.argv[1] # ~/Downloads/dataset/trainX.npy #output_modeldir = sys.argv[2] # ./model output_filename = sys.argv[2] # ./chekpoints/baseline.pth # dataset trainX = np.load(sys.argv[1]) print("trainX", trainX.shape) trainX_preprocessed = preprocess(trainX) print("trainX_preprocessed", trainX_preprocessed.shape) img_dataset = Image_Dataset(trainX_preprocessed) img_dataloader = DataLoader(img_dataset, batch_size=64, shuffle=True) # model, loss, optimizer model = AE().cuda() criterion = nn.MSELoss() optimizer = torch.optim.Adam(model.parameters(), lr=1 * 1e-5,
TAR_INPUT_DIMENSION = args.tar_input_dim N_DIMENSION = args.n_dim CLASS_NUM = args.class_num SHOT_NUM_PER_CLASS = args.shot_num_per_class QUERY_NUM_PER_CLASS = args.query_num_per_class EPISODE = args.episode TEST_EPISODE = args.test_episode LEARNING_RATE = args.learning_rate GPU = args.gpu HIDDEN_UNIT = args.hidden_unit # Hyper Parameters in target domain data set TEST_CLASS_NUM = args.test_class_num # the number of class TEST_LSAMPLE_NUM_PER_CLASS = args.test_lsample_num_per_class # the number of labeled samples per class 5 4 3 2 1 utils.same_seeds(0) def _init_(): if not os.path.exists('checkpoints'): os.makedirs('checkpoints') if not os.path.exists('classificationMap'): os.makedirs('classificationMap') _init_() # load source domain data set with open(os.path.join('datasets', 'Chikusei_imdb_128.pickle'), 'rb') as handle: source_imdb = pickle.load(handle) print(source_imdb.keys())