def clone_and_queue(template_task: str, queue: str) -> Task: github_payload = os.getenv('GITHUB_EVENT_PATH') with open(github_payload, 'r') as f: payload = json.load(f) task = Task.get_task(task_id=template_task) # Clone the task to pipe to. This creates a task with status Draft whose parameters can be modified. cloned_task = Task.clone(source_task=task, name=f"{template_task} cloned task from Github") script_commit = payload.get("comment", {}).get("body", "").partition(" ")[2] selected_type, _, selected_value = script_commit.partition(" ") if selected_type and selected_value: data_script = cloned_task.data.script if selected_type == "branch": data_script.branch = selected_value data_script.tag = "" data_script.version_num = "" elif selected_type == "tag": data_script.branch = "" data_script.tag = selected_value data_script.version_num = "" elif selected_type == "commit": data_script.branch = "" data_script.tag = "" data_script.version_num = selected_value else: raise Exception( f"You must supply branch, tag or commit as type, not {selected_type}" ) print(f"Change train script head to {selected_value} {selected_type}") # noinspection PyProtectedMember cloned_task._update_script(script=data_script) Task.enqueue(cloned_task.id, queue_name=queue) owner, repo = payload.get("repository", {}).get("full_name", "").split("/") if owner and repo: gh = login(token=os.getenv("GITHUB_TOKEN")) if gh: issue = gh.issue(owner, repo, payload.get("issue", {}).get("number")) if issue: issue.create_comment( f"New task, id:{cloned_task.id} is in queue {queue_name}") else: print( f'can not comment issue, {payload.get("issue", {}).get("number")}' ) else: print(f"can not log in to gh, {os.getenv('GITHUB_TOKEN')}") return cloned_task
def trains(self, x: data_type, y: data_type = None, x_cv: data_type = None, y_cv: data_type = None, *, trains_config: Dict[str, Any] = None, keep_task_open: bool = False, queue: str = None) -> "Wrapper": if trains_config is None: return self.fit(x, y, x_cv, y_cv) # init trains if trains_config is None: trains_config = {} project_name = trains_config.get("project_name") task_name = trains_config.get("task_name") if queue is None: task = Task.init(**trains_config) cloned_task = None else: task = Task.get_task(project_name=project_name, task_name=task_name) cloned_task = Task.clone(source_task=task, parent=task.id) # before loop self._verbose_level = 6 self._data_config["verbose_level"] = 6 self._before_loop(x, y, x_cv, y_cv) self.pipeline.use_tqdm = False copied_config = shallow_copy_dict(self.config) if queue is not None: cloned_task.set_parameters(copied_config) Task.enqueue(cloned_task.id, queue) return self # loop task.connect(copied_config) global trains_logger trains_logger = task.get_logger() self._loop() if not keep_task_open: task.close() trains_logger = None return self
param['param_name_new_value'] = 3 # The queue where we want the template task (clone) to be sent to param['execution_queue_name'] = 'default' # Simulate the work of a Task print('Processing....') sleep(2.0) print('Done processing :)') # Get a reference to the task to pipe to. next_task = Task.get_task(project_name=task.get_project_name(), task_name=param['next_task_name']) # Clone the task to pipe to. This creates a task with status Draft whose parameters can be modified. cloned_task = Task.clone(source_task=next_task, name='Auto generated cloned task') # Get the original parameters of the Task, modify the value of one parameter, # and set the parameters in the next Task cloned_task_parameters = cloned_task.get_parameters() cloned_task_parameters[param['param_name']] = param['param_name_new_value'] cloned_task.set_parameters(cloned_task_parameters) # Enqueue the Task for execution. The enqueued Task must already exist in the trains platform print('Enqueue next step in pipeline to queue: {}'.format( param['execution_queue_name'])) Task.enqueue(cloned_task.id, queue_name=param['execution_queue_name']) # We are done. The next step in the pipe line is in charge of the pipeline now. print('Done')
# Select base template task # Notice we can be more imaginative and use task_id which will eliminate the need to use project name template_task = Task.get_task(project_name='examples', task_name='Keras AutoML base') for i in range(total_number_of_experiments): # clone the template task into a new write enabled task (where we can change parameters) cloned_task = Task.clone(source_task=template_task, name=template_task.name + ' {}'.format(i), parent=template_task.id) # get the original template parameters cloned_task_parameters = cloned_task.get_parameters() # override with random samples form grid for k in space.keys(): cloned_task_parameters[k] = space[k]() # put back into the new cloned task cloned_task.set_parameters(cloned_task_parameters) print('Experiment {} set with parameters {}'.format( i, cloned_task_parameters)) # enqueue the task for execution Task.enqueue(cloned_task.id, queue_name=execution_queue_name) print('Experiment id={} enqueue for execution'.format(cloned_task.id)) # we are done, the next step is to watch the experiments graphs print('Done')