def resolve_uri_locally(self, artifact_uri: Text, path: Text = None): """ Takes a URI that points within the artifact store, downloads the URI locally, then returns local URI. Args: artifact_uri: uri to artifact. path: optional path to download to. If None, is inferred. """ if not path_utils.is_remote(artifact_uri): # Its already local return artifact_uri if path is None: path = os.path.join( GlobalConfig.get_config_dir(), self.unique_id, ArtifactStore.get_component_name_from_uri(artifact_uri), path_utils.get_parent(artifact_uri) # unique ID from MLMD ) # Create if not exists and download path_utils.create_dir_recursive_if_not_exists(path) path_utils.copy_dir(artifact_uri, path, overwrite=True) return path
def run_fn(self): train_dataset = self.input_fn(self.train_files, self.tf_transform_output) eval_dataset = self.input_fn(self.eval_files, self.tf_transform_output) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = self.model_fn(train_dataset, eval_dataset) model.to(device) criterion = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) model.train() for e in range(1, self.epoch + 1): epoch_loss = 0 epoch_acc = 0 step_count = 0 for x, y in train_dataset: step_count += 1 X_batch, y_batch = x.to(device), y.to(device) optimizer.zero_grad() y_pred = model(X_batch) loss = criterion(y_pred, y_batch) acc = binary_acc(y_pred, y_batch) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() print(f'Epoch {e + 0:03}: | Loss: ' f'{epoch_loss / step_count:.5f} | Acc: ' f'{epoch_acc / step_count:.3f}') path_utils.create_dir_if_not_exists(self.serving_model_dir) if path_utils.is_remote(self.serving_model_dir): temp_model_dir = '__temp_model_dir__' temp_path = os.path.join(os.getcwd(), temp_model_dir) if path_utils.is_dir(temp_path): raise PermissionError('{} is used as a temp path but it ' 'already exists. Please remove it to ' 'continue.') torch.save(model, temp_path) path_utils.copy_dir(temp_path, self.serving_model_dir) path_utils.rm_dir(temp_path) else: torch.save(model, os.path.join(self.serving_model_dir, 'model.pt'))
def run_fn(self): train_dataset = self.input_fn(self.train_files, self.tf_transform_output) eval_dataset = self.input_fn(self.eval_files, self.tf_transform_output) class LitModel(pl.LightningModule): def __init__(self): super().__init__() self.l1 = torch.nn.Linear(8, 64) self.layer_out = torch.nn.Linear(64, 1) def forward(self, x): x = torch.relu(self.l1(x)) x = self.layer_out(x) return x def training_step(self, batch, batch_idx): x, y = batch y_hat = self(x) loss = F.binary_cross_entropy_with_logits(y_hat, y) tensorboard_logs = {'train_loss': loss} return {'loss': loss, 'log': tensorboard_logs} def configure_optimizers(self): return torch.optim.Adam(self.parameters(), lr=0.001) def train_dataloader(self): return train_dataset def validation_step(self, batch, batch_idx): x, y = batch y_hat = self(x) return { 'val_loss': F.binary_cross_entropy_with_logits(y_hat, y) } def validation_epoch_end(self, outputs): avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean() tensorboard_logs = {'val_loss': avg_loss} return {'avg_val_loss': avg_loss, 'log': tensorboard_logs} def val_dataloader(self): return eval_dataset model = LitModel() # most basic trainer, uses good defaults trainer = Trainer( default_root_dir=self.log_dir, max_epochs=self.epoch, ) trainer.fit(model) path_utils.create_dir_if_not_exists(self.serving_model_dir) if path_utils.is_remote(self.serving_model_dir): temp_model_dir = '__temp_model_dir__' temp_path = os.path.join(os.getcwd(), temp_model_dir) if path_utils.is_dir(temp_path): raise PermissionError('{} is used as a temp path but it ' 'already exists. Please remove it to ' 'continue.') trainer.save_checkpoint(os.path.join(temp_path, 'model.cpkt')) path_utils.copy_dir(temp_path, self.serving_model_dir) path_utils.rm_dir(temp_path) else: trainer.save_checkpoint( os.path.join(self.serving_model_dir, 'model.ckpt'))
def run_fn(self): train_dataset = self.input_fn(self.train_files, self.tf_transform_output) eval_dataset = self.input_fn(self.eval_files, self.tf_transform_output) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model = self.model_fn(train_dataset, eval_dataset) model.to(device) criterion = nn.BCEWithLogitsLoss() optimizer = optim.Adam(model.parameters(), lr=0.001) writer = SummaryWriter(self.log_dir) model.train() total_count = 0 for e in range(1, self.epochs + 1): epoch_loss = 0 epoch_acc = 0 step_count = 0 for x, y, _ in train_dataset: step_count += 1 total_count += 1 x_batch = torch.cat([v.to(device) for v in x.values()], dim=-1) y_batch = torch.cat([v.to(device) for v in y.values()], dim=-1) optimizer.zero_grad() y_pred = model(x_batch) loss = criterion(y_pred, y_batch) acc = binary_acc(y_pred, y_batch) loss.backward() optimizer.step() epoch_loss += loss.item() epoch_acc += acc.item() if e == 1 and step_count == 1: writer.add_graph(model, x_batch) writer.add_scalar('training_loss', loss, total_count) writer.add_scalar('training_accuracy', acc, total_count) print(f'Epoch {e + 0:03}: | Loss: ' f'{epoch_loss / step_count:.5f} | Acc: ' f'{epoch_acc / step_count:.3f}') # test test_results = self.test_fn(model, eval_dataset) utils.save_test_results(test_results, self.test_results) path_utils.create_dir_if_not_exists(self.serving_model_dir) if path_utils.is_remote(self.serving_model_dir): temp_model_dir = '__temp_model_dir__' temp_path = os.path.join(os.getcwd(), temp_model_dir) if path_utils.is_dir(temp_path): raise PermissionError('{} is used as a temp path but it ' 'already exists. Please remove it to ' 'continue.') torch.save(model, temp_path) path_utils.copy_dir(temp_path, self.serving_model_dir) path_utils.rm_dir(temp_path) else: torch.save(model, os.path.join(self.serving_model_dir, 'model.pt'))