def train_autoencoder_deep_fashion(): """Trains the autoencoder for DeepFashion.""" print("=============================================================") print("================ Train AE with DeepFashion ==================") print("=============================================================\n") encoder = CifarNet(input_channels=3, num_classes=50) encoder = encoder.to(DEVICE) decoder = Decoder(input_channels=64, num_classes=50, out_channels=3) decoder = decoder.to(DEVICE) parameters = list(encoder.parameters()) + list(decoder.parameters()) loss_fn = nn.MSELoss() # Observe that all parameters are being optimized optimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE_TRAIN) # Decay LR by a factor of GAMMA scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=STEP_SIZE_TRAIN, gamma=GAMMA) return train(encoder, decoder, loss_fn, optimizer, scheduler, EPOCHS, train_loader_deep_fashion)
class TrainTask(Task): def __init__(self, task_id, sample_dir: Task, aggregate_dir: Task = None): super(TrainTask, self).__init__(task_id=str(task_id)) self.sample_dir = sample_dir self.aggregate_dir = aggregate_dir def load(self) -> None: self.model = CifarNet() self.optimizer = SGD(self.model.parameters(), lr=0.001, momentum=0.9) self.lr_scheduler = MultiStepLR(self.optimizer, [10, 30, 60, 90]) self.criterion = nn.CrossEntropyLoss() # Load dataset sample_path = os.path.join(self.sample_dir, "sample-%s.csv" % self.task_id) df = pd.read_csv(sample_path, header=None) data = df.values.tolist() self.dataset = CifarDataset(data) def train(self, device: str) -> dict: if self.aggregate_dir is not None: pre_model_path = os.path.join(self.aggregate_dir, "aggregate.pth") else: pre_model_path = None self.trainer = SupervisedTrainer(self.model, self.optimizer, self.criterion, self.lr_scheduler, epoch=10, device=device, init_model_path=pre_model_path, console_out="console.out") _, correct, total = self.trainer.test(pre_model_path, self.dataset) ret = { "init_acc": "%.2f%%(%d/%d)" % (100 * correct / total, correct, total) } self.trainer.mount_dataset(self.dataset) ret.update(self.trainer.train()) return ret
def train_exemplar_cnn_deep_fashion(): """Trains the exemplar cnn model.""" print("============================================================") print("============ Train ExemplarCNN with DeepFashion ============") print("============================================================\n") # number of predicted classes = number of training images model = CifarNet(input_channels=3, num_classes=len(train_loader_deep_fashion.dataset)) model = model.to(DEVICE) loss_fn = nn.CrossEntropyLoss() # Observe that all parameters are being optimized parameters = model.parameters() optimizer = torch.optim.Adam(parameters, lr=LEARNING_RATE_TRAIN) # Decay LR by a factor of GAMMA scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=STEP_SIZE_TRAIN, gamma=GAMMA) return train(model, loss_fn, optimizer, scheduler, EPOCHS, train_loader_deep_fashion)
def train_rotation_net_deep_fashion(): """Trains the rotation model.""" print("============================================================") print("========== Train Rotation Model with DeepFashion ===========") print("============================================================\n") model = CifarNet(input_channels=3, num_classes=4) model = model.to(DEVICE) loss_fn = nn.CrossEntropyLoss() # Observe that all parameters are being optimized optimizer = torch.optim.Adam(model.parameters(), lr=LEARNING_RATE_TRAIN) # Decay LR by a factor of GAMMA scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=STEP_SIZE_TRAIN, gamma=GAMMA) return train(model, loss_fn, optimizer, scheduler, EPOCHS, train_loader_deep_fashion, val_loader_deep_fashion)
def train_supervised_deep_fashion(): """Trains the supervised model.""" print("============================================================") print("============= Supervised Training DeepFashion ==============") print("============================================================\n") df_supervised_model = CifarNet(input_channels=3, num_classes=50) df_supervised_model = df_supervised_model.to(DEVICE) loss_fn = nn.CrossEntropyLoss() # Observe that all parameters are being optimized optimizer = torch.optim.Adam(df_supervised_model.parameters(), lr=LEARNING_RATE_TRAIN) # Decay LR by a factor of GAMMA scheduler = torch.optim.lr_scheduler.StepLR(optimizer=optimizer, step_size=STEP_SIZE_TRAIN, gamma=GAMMA) return fine_tune(df_supervised_model, loss_fn, optimizer, scheduler, EPOCHS, train_loader_deep_fashion, val_loader_deep_fashion)