import sys sys.path.insert( 0, '/Users/utxeee/Desktop/deep-learning-com-perfis/code/pyTorch/utils') import torch from torch.utils.data.dataset import Dataset from torch.utils.data import DataLoader import data_preprocessor as dp import data_splitting as ds import utils # ----------------------------------------------------------------------------- # Force custom modules reloading otherwise changes in custom modules after # loading will not be taken into account herein! # ----------------------------------------------------------------------------- utils.reload_modules([dp, ds, utils]) # ----------------------------------------------------------------------------- # Class: TrainDataset # # Description: Dataloader for train dataset # ----------------------------------------------------------------------------- class TrainDataset(Dataset): def __init__(self): #ratings_matrix = dp.build_preprocessed_ratings_matrix() train_matrix = ds.get_train_matrix() self.len = train_matrix.shape[0] self.train_matrix = torch.from_numpy(train_matrix) # By default the train matrix tensor is a double after converting it # from numpy but the model is expecting a float, hence, we cast the # type here!
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # # # <pep8 compliant> from utils import Settings, get_sources_path, reload_modules, print_title, isolated_print settings = Settings(get_sources_path()) # reload modules if the option is enabled in the optimization_tools.ini file reload_modules(settings) import os from pathlib import Path from constants import * from utils import check_configuration, ScriptError, build_package, pr_bg_green, pr_bg_red from msfs_project import MsfsProject def merge_sceneries(script_settings): try: # instantiate the msfsProject and create the necessary resources if it does not exist msfs_project = MsfsProject(script_settings.projects_path, script_settings.project_name, script_settings.definition_file,
import sys sys.path.insert(0, 'auto-rec') sys.path.insert(0, 'utils') import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn from torch.autograd import Variable import torch.nn as nn import torch.optim as optim # Optimization package import auto_rec_model as arm import utils #def init(): utils.reload_modules([utils, arm]) INPUT_SIZE = 5 HIDDEN_SIZE = 3 autorec = arm.AutoRecModel(INPUT_SIZE, HIDDEN_SIZE) #utils.print_parameters(autorec) input = torch.tensor([1.,-1.,3,-1,5], requires_grad=False) loss_function = nn.MSELoss() optimizer = optim.SGD(autorec.parameters(), lr=1e-1) def add_grad_hooks(encoder_hook, decoder_hook): for name, param in autorec.named_parameters(): if name == "encoder.weight": encoder_handle = param.register_hook(encoder_hook) if name == "decoder.weight":
import sys sys.path.insert( 0, '/Users/utxeee/Desktop/deep-learning-com-perfis/code/pyTorch/utils') import numpy as np import raw_data_loader as rdl #import data_splitting as ds import utils from sklearn import preprocessing from sklearn.preprocessing import StandardScaler # ----------------------------------------------------------------------------- # Force custom modules reloading otherwise changes in custom modules after # loading will not be taken into account herein! # ----------------------------------------------------------------------------- utils.reload_modules([rdl, utils]) # ----------------------------------------------------------------------------- # Scale rating to range [lower_bound, upper_bound] (via third-party libraries) # # Remark: Tipically, we scale ratings to [1.10] range because we want to get rid # of 0 stars ratings as that rating will be allocated to missing ratings # and besides that a range between [1,10] is more meaningful than [0,100] # ----------------------------------------------------------------------------- def scale_ratings(lower_bound, upper_bound, ratings_matrix): min_max_scaler = preprocessing.MinMaxScaler(feature_range=(lower_bound, upper_bound)) reshaped_ratings_matrix = np.array(ratings_matrix).reshape(-1, 1) #return zip(reshaped_ratings, min_max_scaler.fit_transform(reshaped_ratings)) return min_max_scaler.fit_transform(reshaped_ratings_matrix).reshape( ratings_matrix.shape)
sys.path.insert(0, '/Users/utxeee/Desktop/deep-learning-com-perfis/code/pyTorch/data-manipulation') import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import auto_rec_model as arm import auto_rec_loss_function as arl import data_loader as dl import utils import math # ----------------------------------------------------------------------------- # Force custom modules reloading otherwise changes in custom modules after # loading will not be taken into account herein! # ----------------------------------------------------------------------------- utils.reload_modules([arm,dl,arl,utils]) # ----------------------------------------------------------------------------- # Model initialization # ----------------------------------------------------------------------------- D_in = D_out = 71 # number of activities H = 20 NUM_EPOCHS = 1 LEARNING_RATE = 1e-2 MOMENTUM = 0.9 WEIGHT_DECAY = 0 #0.5 # ----------------------------------------------------------------------------- # Model initialization # -----------------------------------------------------------------------------