def get_ae_config(args, model, dataset, BCE_Loss): print("Preparing training D1 for %s"%(dataset.name)) # 80%, 20% for local train+test train_ds, valid_ds = dataset.split_dataset(0.8) if dataset.name in Global.mirror_augment: print(colored("Mirror augmenting %s"%dataset.name, 'green')) new_train_ds = train_ds + MirroredDataset(train_ds) train_ds = new_train_ds # Initialize the multi-threaded loaders. train_loader = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True, num_workers=args.workers, pin_memory=True) valid_loader = DataLoader(valid_ds, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True) all_loader = DataLoader(dataset, batch_size=args.batch_size, num_workers=args.workers, pin_memory=True) # Set up the model model = model.to(args.device) # Set up the criterion criterion = None if BCE_Loss: criterion = nn.BCEWithLogitsLoss().to(args.device) else: criterion = nn.MSELoss().to(args.device) model.default_sigmoid = True # Set up the config config = IterativeTrainerConfig() config.name = 'autoencoder_%s_%s'%(dataset.name, model.preferred_name()) config.train_loader = train_loader config.valid_loader = valid_loader config.phases = { 'train': {'dataset' : train_loader, 'backward': True}, 'test': {'dataset' : valid_loader, 'backward': False}, 'all': {'dataset' : all_loader, 'backward': False}, } config.criterion = criterion config.classification = False config.cast_float_label = False config.autoencoder_target = True config.stochastic_gradient = True config.visualize = not args.no_visualize config.sigmoid_viz = BCE_Loss config.model = model config.logger = Logger() config.optim = optim.Adam(model.parameters(), lr=1e-3) config.scheduler = optim.lr_scheduler.ReduceLROnPlateau(config.optim, patience=10, threshold=1e-3, min_lr=1e-6, factor=0.1, verbose=True) config.max_epoch = 120 if hasattr(model, 'train_config'): model_train_config = model.train_config() for key, value in model_train_config.iteritems(): print('Overriding config.%s'%key) config.__setattr__(key, value) return config
def get_base_config(self, dataset): print("Preparing training D1 for %s" % (dataset.name)) # Initialize the multi-threaded loaders. all_loader = DataLoader(dataset, batch_size=self.args.batch_size, num_workers=self.args.workers, pin_memory=True) # Set up the model model = Global.get_ref_ali(dataset.name)[0]().to(self.args.device) # Set up the criterion criterion = None if self.default_model == 0: criterion = nn.BCEWithLogitsLoss().to(self.args.device) else: criterion = nn.MSELoss().to(self.args.device) model.default_sigmoid = True # Set up the config config = IterativeTrainerConfig() config.name = '%s-ALIAE1' % (self.args.D1) config.phases = { 'all': {'dataset': all_loader, 'backward': False}, } config.criterion = criterion config.classification = False config.cast_float_label = False config.autoencoder_target = True config.stochastic_gradient = True config.visualize = not self.args.no_visualize config.sigmoid_viz = self.default_model == 0 config.model = model config.optim = None h_path = path.join(self.args.experiment_path, '%s' % (self.__class__.__name__), '%d' % (self.default_model), '%s-%s.pth' % (self.args.D1, self.args.D2)) h_parent = path.dirname(h_path) config.logger = Logger(h_parent) return config