def __init__(self, path_to_model): self.transform = Transform().val_transform self.model = NIMA(pretrained_base_model=False) state_dict = torch.load(path_to_model, map_location=lambda storage, loc: storage) self.model.load_state_dict(state_dict) self.model.eval()
class InferenceModel: @classmethod def create_model(cls): path_to_model = download_file(config('MODEL_URL'), config('MODEL_PATH')) return cls(path_to_model) def __init__(self, path_to_model): self.transform = Transform().val_transform self.model = NIMA(pretrained_base_model=False) state_dict = torch.load(path_to_model, map_location=lambda storage, loc: storage) self.model.load_state_dict(state_dict) self.model.eval() def predict_from_file(self, image_path): image = default_loader(image_path) return self.predict(image) def predict_from_pil_image(self, image): image = image.convert('RGB') return self.predict(image) def predict(self, image): image = self.transform(image) image = image.unsqueeze_(0) image = torch.autograd.Variable(image, volatile=True) prob = self.model(image).data.numpy()[0] mean_score = get_mean_score(prob) std_score = get_std_score(prob) return format_output(mean_score, std_score, prob)
def start_check_model(params: ValidateParams): val_loader, test_loader = _create_val_data_part(params) model = NIMA() model.load_state_dict(torch.load(params.path_to_model_weight)) criterion = EDMLoss() model = model.to(device) criterion.to(device) val_loss = validate(model=model, loader=val_loader, criterion=criterion) test_loss = validate(model=model, loader=test_loader, criterion=criterion) return val_loss, test_loss
def get_optimizer(optimizer_type: str, model: NIMA, init_lr: float) -> torch.optim.Optimizer: if optimizer_type == "adam": optimizer = torch.optim.Adam(model.parameters(), lr=init_lr) elif optimizer_type == "sgd": optimizer = torch.optim.SGD(model.parameters(), lr=init_lr, momentum=0.5, weight_decay=9) else: raise ValueError(f"not such optimizer {optimizer_type}") return optimizer
def start_check_model(params: ValidateParams): val_loader, test_loader = _create_val_data_part(params) model = NIMA() model.load_state_dict(torch.load(params.path_to_model_weight)) criterion = EDMLoss() if use_gpu: model = model.cuda() model = torch.nn.DataParallel(model) criterion.cuda() val_loss = validate(model=model, loader=val_loader, criterion=criterion) test_loss = validate(model=model, loader=test_loader, criterion=criterion) return val_loss, test_loss
def start_check_model(params: ValidateParams): print("Start checking model...") val_loader, test_loader = _create_val_data_part(params) model = NIMA(pretrained_base_model=False) model.load_state_dict(torch.load(params.path_to_model_weight)) criterion = EDMLoss() model = model.to(device) criterion.to(device) val_loss = validate(model=model, loader=val_loader, criterion=criterion) test_loss = validate(model=model, loader=test_loader, criterion=criterion) print("Checking done. ") return val_loss, test_loss
def start_train(params: TrainParams, pretrained_model): train_loader, val_loader = _create_train_data_part(params=params) model = NIMA(pretrained_model) optimizer = torch.optim.Adam(model.parameters(), lr=params.init_lr) criterion = EDMLoss() model = model.to(device) criterion.to(device) writer = SummaryWriter( log_dir=os.path.join(params.experiment_dir_name, 'logs')) os.makedirs(params.experiment_dir_name, exist_ok=True) params.save_params(os.path.join(params.experiment_dir_name, 'params.json')) for e in range(params.num_epoch): print("epoch: {}".format(e)) train_loss = train(model=model, loader=train_loader, optimizer=optimizer, criterion=criterion, writer=writer, global_step=len(train_loader.dataset) * e, name=f"{params.experiment_dir_name}_by_batch") val_loss = validate(model=model, loader=val_loader, criterion=criterion, writer=writer, global_step=len(train_loader.dataset) * e, name=f"{params.experiment_dir_name}_by_batch") model_name = f"emd_loss_epoch_{e}_train_{train_loss}_{val_loss}.pth" torch.save(model.state_dict(), os.path.join(params.experiment_dir_name, model_name)) print("Model saved. ") writer.add_scalar(f"{params.experiment_dir_name}_by_epoch/train_loss", train_loss, global_step=e) writer.add_scalar(f"{params.experiment_dir_name}_by_epoch/val_loss", val_loss, global_step=e) print("Training done. ") writer.export_scalars_to_json( os.path.join(params.experiment_dir_name, 'all_scalars.json')) writer.close()
def start_train(params: TrainParams): train_loader, val_loader = _create_train_data_part(params=params) model = NIMA() optimizer = torch.optim.Adam(model.parameters(), lr=params.init_lr) criterion = EDMLoss() if use_gpu: model = model.cuda() model = torch.nn.DataParallel(model) criterion.cuda() writer = SummaryWriter( log_dir=os.path.join(params.experiment_dir_name, 'logs')) os.makedirs(params.experiment_dir_name, exist_ok=True) params.save_params(os.path.join(params.experiment_dir_name, 'params.json')) for e in range(1, params.num_epoch + 1): train_loss = train(model=model, loader=train_loader, optimizer=optimizer, criterion=criterion) val_loss = validate(model=model, loader=val_loader, criterion=criterion) print(f"train_loss {train_loss} val_loss = {val_loss}") model_name = f"emd_loss_epoch_{e}_train_{train_loss}_{val_loss}.pth" torch.save(model.module.state_dict(), os.path.join(params.experiment_dir_name, model_name)) writer.add_scalar(f"{params.experiment_dir_name}/train_loss", train_loss, global_step=e) writer.add_scalar(f"{params.experiment_dir_name}/val_loss", val_loss, global_step=e) writer.export_scalars_to_json( os.path.join(params.experiment_dir_name, 'all_scalars.json')) writer.close()
class InferenceModel: def __init__(self,device): self.transform = Transform().eval_transform self.model = NIMA(pretrained_base_model=True) self.model = self.model.to(device) self.model.eval() def predict_from_file(self, image_path): image = default_loader(image_path) return self.predict(image) def predict_from_pil_image(self, image): image = image.convert('RGB') return self.predict(image) def predict(self, image): image = image*0.5 + 0.5 #rescale from [-1,1]-->[0,1] image = F.interpolate(image,size=(224,224),mode='bilinear') with torch.no_grad(): prob = self.model(image).data.cpu().numpy()[0] mean_score = get_mean_score(prob) std_score = get_std_score(prob) return mean_score+std_score
def __init__(self,device): self.transform = Transform().eval_transform self.model = NIMA(pretrained_base_model=True) self.model = self.model.to(device) self.model.eval()