from clearml import Task from clearml.automation.controller import PipelineController from dataclasses import dataclass @dataclass class PipeConfig: input_dataset_id: str = "86895530658c47a4918bda4f0d92c3e8" image_size_values: set = (192, 224, 311, 512) if __name__ == "__main__": # force colab to get dataclasses Task.add_requirements('dataclasses') # Track everything on ClearML Free base_project_name = 'R|D?R&D! Webinar 01' task = Task.init( project_name=base_project_name + "_automations", task_name='Pipeline example', output_uri=True, # auto save everything to Clearml Free ) pipe_cfg = PipeConfig() task.connect(pipe_cfg, 'pipeline config') # possibly control everything from here: # train_cfg = FlowerTrainingConfig() # aug_cfg = AugConfig() # task.connect(train_cfg, 'pipeline config') # task.connect(aug_cfg, 'augmentation config') # TODO: build a parameter override for training tasks
elif FLAGS.num_workers > 1: strategy = tf.distribute.experimental.MultiWorkerMirroredStrategy() else: strategy = tf.compat.v2.distribute.MirroredStrategy() with strategy.scope(): model_lib_v2.train_loop( pipeline_config_path=FLAGS.pipeline_config_path, model_dir=FLAGS.model_dir, train_steps=FLAGS.num_train_steps, use_tpu=FLAGS.use_tpu, checkpoint_every_n=FLAGS.checkpoint_every_n, record_summaries=FLAGS.record_summaries) if __name__ == '__main__': Task.add_requirements('tensorflow==2.2') Task.add_requirements( '/data/cv_ml_models/ai-calibration/packages/object_detection-0.1-py3-none-any.whl' ) Task.add_requirements('.') task = Task.init(project_name='Clearml Tests', task_name='Train raccoon model') tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) flags.DEFINE_string('pipeline_config_path', None, 'Path to pipeline config ' 'file.') flags.DEFINE_integer('num_train_steps', None, 'Number of train steps.') flags.DEFINE_bool(
batch_size, _, _, _ = image.shape x = self.effnet.extract_features(image) x = F.adaptive_avg_pool2d(x, 1).reshape(batch_size, -1) outputs = self.out(self.dropout(x)) if targets is not None: loss = nn.CrossEntropyLoss()(outputs, targets) metrics = self.monitor_metrics(outputs, targets) return outputs, loss, metrics return outputs, 0, {} if __name__ == "__main__": # force colab to get dataclasses # <--- Task.add_requirements('dataclasses') # override numpy version for colab Task.add_requirements('numpy', '1.19.5') # Track everything on ClearML Free task = Task.init( project_name='R|D?R&D! Webinar 01', task_name='remote control interface', output_uri=True, # auto save everything to Clearml Free ) # task.connect(FlowerTrainingConfig, 'config') cfg = FlowerTrainingConfig() task.connect(cfg, 'config') # <--- # Need to run on cpu only? device = "cuda" if torch.cuda.is_available() else "cpu"
import pandas as pd import plotly.express as px from clearml import Task, Dataset @dataclass class EDAConf: dataset_metadata_id: str = "5b3da654bb1c4b9c81acfcf4d75063ea" dataset_metadata_artifact_name: str = 'dataset_metadata' # put graphics options here ... if __name__ == '__main__': Task.add_requirements('dataclasses') Task.add_requirements('plotly') # force colab to get dataclasses Task.add_requirements('dataclasses', '0.4') # override versions for colab Task.add_requirements('pandas', '1.1.5') Task.add_requirements('numpy', '1.19.5') # Track everything on ClearML Free task = Task.init( project_name='R|D?R&D! Webinar 01', task_name='EDA example', output_uri=True, # auto save everything to Clearml Free ) cfg = EDAConf() task.connect(cfg, 'EDA Config')