def do(self, workflow_dict): hosts_to_reboot = [] try: script = "hostname | grep 'localhost.localdomain' | wc -l" for host in workflow_dict['hosts']: output = {} return_code = exec_remote_command_host(host, script, output) if return_code != 0: raise Exception(str(output)) ret_value = int(output['stdout'][0]) if ret_value >= 1: LOG.info( "VM {} hostname is localhost.localdomain".format(host)) hosts_to_reboot.append(host) except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0007) workflow_dict['exceptions']['traceback'].append(traceback) return False if not hosts_to_reboot: return True script = '/sbin/reboot -f > /dev/null 2>&1 &' for host in hosts_to_reboot: LOG.info("Rebooting {}...".format(host)) output = {} try: exec_remote_command_host(host, script, output) except: pass script = 'puppet-setup' for host in hosts_to_reboot: output = {} for attempt in range(1, 11): LOG.info("Running puppet {} - Attempt {}/10...".format( host, attempt)) try: return_code = exec_remote_command_host( host, script, output) if return_code != 0: raise EnvironmentError except Exception: LOG.info( 'Could not execute puppet-setup retrying. {}'.format( output)) sleep(30) else: break else: workflow_dict['exceptions']['error_codes'].append(DBAAS_0007) workflow_dict['exceptions']['traceback'].append( 'Could not execute puppet-setup in {} - {}'.format( host, output)) return False return True
def do(self, workflow_dict): try: sleep(10) host = workflow_dict['host'] command = 'umount /data' output = {} return_code = exec_remote_command_host(host, command, output) if return_code != 0: raise Exception(str(output)) if len(workflow_dict['not_primary_hosts']) >= 1: for host in workflow_dict['not_primary_hosts']: command = 'rm -rf /data/data/*' output = {} return_code = exec_remote_command_host( host, command, output) if return_code != 0: raise Exception(str(output)) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0021) workflow_dict['exceptions']['traceback'].append(traceback) return False
def reverse_ip(self): output = {} script = 'nslookup {}'.format(self.host.address) exec_remote_command_host(self.host, script, output) ret = output['stdout'][3] if 'name = ' not in ret: return None return ret.split('name = ')[1].split('.\n')[0]
def puppet_code_status(self): output = {} script = "tail -7 /var/log/ks-post.log" exec_remote_command_host(self.host, script, output, True) for line in output["stdout"]: if "puppet-setup" in line and "return code:" in line: return int(line.split("return code: ")[1]), output return -1, output
def puppet_code_status(self): output = {} script = "tail -7 /var/log/ks-post.log" exec_remote_command_host(self.host, script, output, True) for line in output["stdout"]: if "puppet-setup" in line and "return code:" in line: return int(line.split("return code: ")[1]), output return -1, output
def reverse_ip(self): output = {} script = 'nslookup {}'.format(self.host.address) exec_remote_command_host(self.host, script, output) ret = ''.join(output['stdout']) if 'name = ' not in ret: return None return ret.split('name = ')[1].split('.\n')[0]
def get_puppet_code_status(self, host): output = {} LOG.info("Puppet-setup LOG info:") exec_remote_command_host(host, "tail -7 /var/log/ks-post.log", output) for line in output["stdout"]: if "puppet-setup" in line and "return code:" in line: return int(line.split("return code: ")[1]), output return 0, output
def switch_master(self, instance=None): sentinel_instance = self.instances_filtered.first() host = sentinel_instance.hostname script = """ #!/bin/bash die_if_error() { local err=$? if [ "$err" != "0" ]; then echo "$*" exit $err fi }""" script += """ /usr/local/redis/src/redis-cli -h {} -p {} <<EOF_DBAAS SENTINEL failover {} exit \nEOF_DBAAS die_if_error "Error reseting sentinel" """.format( sentinel_instance.address, sentinel_instance.port, self.databaseinfra.name ) script = build_context_script({}, script) output = {} return_code = exec_remote_command_host(host, script, output) LOG.info(output) if return_code != 0: raise Exception(str(output))
def run_vm_script(workflow_dict, context_dict, script, reverse=False, wait=0): try: instances_detail = workflow_dict['instances_detail'] final_context_dict = dict( context_dict.items() + workflow_dict['initial_context_dict'].items()) if reverse: instances_detail_final = instances_detail[::-1] else: instances_detail_final = instances_detail for instance_detail in instances_detail_final: host = instance_detail['instance'].hostname final_context_dict['IS_MASTER'] = instance_detail['is_master'] command = build_context_script(final_context_dict, script) output = {} return_code = exec_remote_command_host(host, command, output) if return_code: raise Exception( "Could not run script. Output: {}".format(output)) sleep(wait) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def switch_master(self, instance=None): sentinel_instance = self.instances_filtered.first() host = sentinel_instance.hostname script = """ #!/bin/bash die_if_error() { local err=$? if [ "$err" != "0" ]; then echo "$*" exit $err fi }""" script += """ /usr/local/redis/src/redis-cli -h {} -p {} <<EOF_DBAAS SENTINEL failover {} exit \nEOF_DBAAS die_if_error "Error reseting sentinel" """.format(sentinel_instance.address, sentinel_instance.port, self.databaseinfra.name) script = build_context_script({}, script) output = {} return_code = exec_remote_command_host(host, script, output) LOG.info(output) if return_code != 0: raise Exception(str(output))
def exec_script(self, script): output = {} return_code = exec_remote_command_host(self.host, script, output) if return_code != 0: raise EnvironmentError(str(output)) LOG.info("output: {}".format(output)) return output
def exec_script(self, script): output = {} return_code = exec_remote_command_host(self.host, script, output) if return_code != 0: raise EnvironmentError(str(output)) LOG.info("output: {}".format(output)) return output
def execute_script(self, script): output = {} return_code = exec_remote_command_host(self.host, script, output) if return_code != 0: error = 'Could not execute script {}: {}'.format( return_code, output) raise EnvironmentError(error)
def host_mount_data_percentage(address, task): host = Host.objects.filter(address=address).first() output_message = {} command_status = exec_remote_command_host(host, 'df -hk | grep /data', output_message) if command_status != 0: task.add_detail( message='Could not load mount size: {}'.format(output_message), level=4) return None, None, None values = output_message['stdout'][0].strip().split() values = { 'total': int(values[0]), 'used': int(values[1]), 'free': int(values[2]), 'percentage': int(values[3].replace('%', '')) } task.add_detail(message='Mount /data: {}% ({}kb/{}kb)'.format( values['percentage'], values['used'], values['total']), level=3) return values['percentage'], values['used'], values['total']
def host_mount_data_percentage(address, task): host = Host.objects.filter(address=address).first() output_message = {} command_status = exec_remote_command_host( host, 'df -hk | grep /data', output_message ) if command_status != 0: task.add_detail( message='Could not load mount size: {}'.format(output_message), level=4 ) return None, None, None values = output_message['stdout'][0].strip().split() values = { 'total': int(values[0]), 'used': int(values[1]), 'free': int(values[2]), 'percentage': int(values[3].replace('%', '')) } task.add_detail( message='Mount /data: {}% ({}kb/{}kb)'.format( values['percentage'], values['used'], values['total'] ), level=3 ) return values['percentage'], values['used'], values['total']
def update_fstab(host, source_export_path, target_export_path): command = """sed -i s/"{}"/"{}"/g /etc/fstab""".format( source_export_path, target_export_path ) output = {} return_code = exec_remote_command_host(host, command, output) return return_code, output
def run_vm_script(workflow_dict, context_dict, script): try: instances_detail = workflow_dict['instances_detail'] final_context_dict = dict( context_dict.items() + workflow_dict['initial_context_dict'].items()) for instance_detail in instances_detail: instance = instance_detail['instance'] host = instance.hostname final_context_dict['HOSTADDRESS'] = instance.address final_context_dict['PORT'] = instance.port command = build_context_script(final_context_dict, script) output = {} return_code = exec_remote_command_host(host, command, output) if return_code: raise Exception, "Could not run script. Output: {}".format( output) return True except Exception, e: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0015) workflow_dict['exceptions']['traceback'].append(traceback) return False
def is_running_bootstrap(self): output = {} script = "ps -ef | grep bootstrap-puppet3-loop.sh | grep -v grep | wc -l" return_code = exec_remote_command_host(self.host, script, output, True) if return_code != 0: raise EnvironmentError(str(output)) return int(output['stdout'][0]) > 0
def has_bootstrap_started(self): output = {} script = "cat /var/log/ks-post.log | wc -l" return_code = exec_remote_command_host(self.host, script, output, True) if return_code != 0: raise EnvironmentError(str(output)) return int(output['stdout'][0]) > 0
def has_bootstrap_started(self): output = {} script = "cat /var/log/ks-post.log | wc -l" return_code = exec_remote_command_host(self.host, script, output, True) if return_code != 0: raise EnvironmentError(str(output)) return int(output['stdout'][0]) > 0
def is_hostname_valid(self): output = {} script = "hostname | grep 'localhost.localdomain' | wc -l" return_code = exec_remote_command_host(self.host, script, output) if return_code != 0: raise EnvironmentError(str(output)) return int(output['stdout'][0]) < 1
def is_hostname_valid(self): output = {} script = "hostname | grep 'localhost.localdomain' | wc -l" return_code = exec_remote_command_host(self.host, script, output) if return_code != 0: raise EnvironmentError(str(output)) return int(output['stdout'][0]) < 1
def run_script(self, script, host=None): output = {} return_code = exec_remote_command_host(host or self.host, script, output) if return_code != 0: raise EnvironmentError('Could not execute script {}: {}'.format( return_code, output)) return output
def is_running_bootstrap(self): output = {} script = "ps -ef | grep bootstrap-puppet3-loop.sh | grep -v grep | wc -l" return_code = exec_remote_command_host(self.host, script, output, True) if return_code != 0: raise EnvironmentError(str(output)) return int(output['stdout'][0]) > 0
def agents_command(self, host, command): from util import exec_remote_command_host for agent in self.get_database_agents(): script = '/etc/init.d/{} {}'.format(agent, command) output = {} return_code = exec_remote_command_host(host, script, output) LOG.info('Running {} - Return Code: {}. Output script: {}'.format( script, return_code, output))
def _execute_init_script(self, command): base_host = self.instance.hostname if self.host_migrate else self.host script = self.driver.initialization_script_path(base_host) script = script.format(option=command) script += ' > /dev/null' output = {} return_code = exec_remote_command_host(self.host, script, output) return return_code, output
def _execute_script(self, script_variables, script): final_script = build_context_script(script_variables, script) output = {} return_code = exec_remote_command_host(self.host, final_script, output) if return_code != 0: raise EnvironmentError( 'Could not execute replica script {}: {}'.format( return_code, output))
def undo(self, workflow_dict): try: LOG.info("Remove all database files") for host in workflow_dict['hosts']: LOG.info("Removing database files on host %s" % host) exec_remote_command_host( host, "/opt/dbaas/scripts/dbaas_deletedatabasefiles.sh") return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0014) workflow_dict['exceptions']['traceback'].append(traceback) return False
def run_script(self, script): output = {} return_code = exec_remote_command_host( host=self.host, command=script, output=output, retry=False, get_pty=self.driver.get_start_pty_default()) return return_code, output
def do(self): if not self.is_valid: return for message, script in self.scripts.items(): output = {} return_code = exec_remote_command_host(self.host, script, output) if return_code != 0: raise EnvironmentError('{} - {}: {}'.format( message, return_code, output))
def run_script(self, script, host=None): output = {} return_code = exec_remote_command_host(host or self.host, script, output) if return_code != 0: raise EnvironmentError( 'Could not execute script {}: {}'.format( return_code, output ) ) return output
def run_script(self, plan_script): script = build_context_script(self.script_variables, plan_script) output = {} return_code = exec_remote_command_host(self.run_script_host, script, output) if return_code != 0: raise EnvironmentError('Could not execute script {}: {}'.format( return_code, output)) return output
def use_database_initialization_script(databaseinfra, host, option): driver = databaseinfra.get_driver() initialization_script = driver.initialization_script_path(host) command = initialization_script.format(option=option) command += ' > /dev/null' output = {} return_code = exec_remote_command_host(host, command, output) return return_code, output
def run_script(self, script, host=None): raise Exception( "U must use the new method. run_script of HostSSH class") from util import exec_remote_command_host output = {} return_code = exec_remote_command_host(host or self.host, script, output) if return_code != 0: raise EnvironmentError('Could not execute script {}: {}'.format( return_code, output)) return output
def get_replication_information_from_file(host, ): command = 'cat /data/data/mysql_binlog_master_file_pos' output = {} return_code = exec_remote_command_host(host, command, output) if return_code != 0: raise Exception("Could not read file: {}".format(output)) replication_file, replication_position = parse_replication_info( output['stdout'][0]) return replication_file, replication_position
def change_slave_priority_file(host, original_value, final_value): script = test_bash_script_error() script += """ sed -i 's/slave-priority {}/slave-priority {}/g' /data/redis.conf """.format(original_value, final_value) script = build_context_script({}, script) output = {} return_code = exec_remote_command_host(host, script, output) LOG.info(output) if return_code != 0: raise Exception(str(output))
def mysql_binlog_save(client, instance): try: client.query('show master status') r = client.store_result() row = r.fetch_row(maxrows=0, how=1) binlog_file = row[0]['File'] binlog_pos = row[0]['Position'] client.query("show variables like 'datadir'") r = client.store_result() row = r.fetch_row(maxrows=0, how=1) datadir = row[0]['Value'] output = {} command = 'echo "master=%s;position=%s" > %smysql_binlog_master_file_pos' % ( binlog_file, binlog_pos, datadir) exec_remote_command_host(instance.hostname, command, output) except Exception as e: LOG.error( "Error saving mysql master binlog file and position: {}".format(e))
def agents_command(self, host, command): from util import exec_remote_command_host for agent in self.get_database_agents(): script = '/etc/init.d/{} {}'.format(agent, command) output = {} return_code = exec_remote_command_host(host, script, output) LOG.info( 'Running {} - Return Code: {}. Output script: {}'.format( script, return_code, output ) )
def do(self, workflow_dict): try: script = "ps -ef | grep bootstrap-puppet3-loop.sh | grep -v grep | wc -l" for host in workflow_dict['hosts']: LOG.info("Getting vm credentials...") attempt = 1 retries = 60 interval = 20 sleep(interval) while True: LOG.info( "Check if puppet-setup is running on {} - attempt {} of {}" .format(host, attempt, retries)) output = {} return_code = exec_remote_command_host( host, script, output) if return_code != 0: raise Exception(str(output)) ret_value = int(output['stdout'][0]) if ret_value == 0: LOG.info( "Puppet-setup is not running on {}".format(host)) break LOG.info("Puppet-setup is running on {}".format(host)) attempt += 1 if attempt == retries: error = "Maximum number of attempts check is puppet is running on {}.".format( host) LOG.error(error) raise Exception(error) sleep(interval) puppet_code_status, output = self.get_puppet_code_status(host) if puppet_code_status != 0: message = "Puppet-setup returned an error on {}. Output: {}".format( host, output) raise EnvironmentError(message) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0013) workflow_dict['exceptions']['traceback'].append(traceback) return False
def check_access(origin, destiny, port): if origin == destiny: return output = {} script = "(echo >/dev/tcp/{}/{}) &>/dev/null && exit 0 || exit 1" script = script.format(destiny.address, port) return_code = exec_remote_command_host(origin, script, output) if return_code != 0: raise EnvironmentError( 'Could not connect from {} to {}:{} - Error: {}'.format( origin.address, destiny.address, port, str(output)))
def mysql_binlog_save(client, instance): try: client.query('show master status') r = client.store_result() row = r.fetch_row(maxrows=0, how=1) binlog_file = row[0]['File'] binlog_pos = row[0]['Position'] client.query("show variables like 'datadir'") r = client.store_result() row = r.fetch_row(maxrows=0, how=1) datadir = row[0]['Value'] output = {} command = 'echo "master=%s;position=%s" > %smysql_binlog_master_file_pos && sync' % ( binlog_file, binlog_pos, datadir ) exec_remote_command_host(instance.hostname, command, output) except Exception as e: LOG.error( "Error saving mysql master binlog file and position: {}".format(e))
def do(self): LOG.info("Start monit on host {}".format(self.host)) script = test_bash_script_error() action = 'start' script += monit_script(action) LOG.info(script) output = {} return_code = exec_remote_command_host(self.host, script, output) LOG.info(output) if return_code != 0: LOG.error("Error starting monit") LOG.error(str(output))
def _execute_script(self, script_variables, script): final_script = build_context_script( script_variables, script ) output = {} return_code = exec_remote_command_host(self.host, final_script, output) if return_code != 0: raise EnvironmentError( 'Could not execute replica script {}: {}'.format( return_code, output ) )
def do(self): from physical.models import DatabaseInfraParameter self.instance.old_port = self.instance.port self.instance.port = 27018 oplogsize = DatabaseInfraParameter.objects.get( databaseinfra=self.infra, parameter__name='oplogSize') script = build_change_oplogsize_script( instance=self.instance, oplogsize=oplogsize.value) output = {} return_code = exec_remote_command_host(self.host, script, output) if return_code != 0: raise Exception(str(output)) self.instance.port = self.instance.old_port
def check_access(origin, destiny, port): if origin == destiny: return output = {} script = "(echo >/dev/tcp/{}/{}) &>/dev/null && exit 0 || exit 1" script = script.format(destiny.address, port) return_code = exec_remote_command_host(origin, script, output) if return_code != 0: raise EnvironmentError( 'Could not connect from {} to {}:{} - Error: {}'.format( origin.address, destiny.address, port, str(output) ) )
def failover_sentinel(host, sentinel_host, sentinel_port, service_name): LOG.info('Failover of Sentinel {}:{}'.format(sentinel_host, sentinel_port)) script = test_bash_script_error() script += """ /usr/local/redis/src/redis-cli -h {} -p {} <<EOF_DBAAS SENTINEL failover {} exit \nEOF_DBAAS die_if_error "Error reseting sentinel" """.format(sentinel_host, sentinel_port, service_name) script = build_context_script({}, script) output = {} return_code = exec_remote_command_host(host, script, output) LOG.info(output) if return_code != 0: raise Exception(str(output))
def is_os_process_running(self, process_name): script = "ps -ef | grep {} | grep -v grep | wc -l".format( process_name ) for _ in range(CHECK_ATTEMPTS): output = {} return_code = exec_remote_command_host(self.host, script, output) if return_code != 0: raise Exception(str(output)) processes = int(output['stdout'][0]) if processes == 0: return False LOG.info("{} is running".format(process_name)) sleep(CHECK_SECONDS) return True
def switch_master(self, instance=None): if instance is None: raise Exception('Cannot switch master in a redis cluster without instance.') slave_instance = self.get_slave_for(instance) if not slave_instance: raise Exception('There is no slave for {}'.format(instance)) host = slave_instance.hostname script = """ #!/bin/bash die_if_error() { local err=$? if [ "$err" != "0" ]; then echo "$*" exit $err fi }""" script += """ /usr/local/redis/src/redis-cli -h {} -p {} -a {} -c<<EOF_DBAAS CLUSTER FAILOVER exit \nEOF_DBAAS die_if_error "Error executing cluster failover" """.format( slave_instance.address, slave_instance.port, self.databaseinfra.password ) script = build_context_script({}, script) output = {} return_code = exec_remote_command_host(host, script, output) LOG.info(output) if return_code != 0: raise Exception(str(output))
def fqdn(self): output = {} script = 'hostname -f' exec_remote_command_host(self.host, script, output) return output['stdout'][0].strip()
def run_script(self, script): output = {} return_code = exec_remote_command_host(self.host, script, output) return return_code, output
def do(self): output = {} script = 'puppet-setup' return_code = exec_remote_command_host(self.host, script, output) if return_code != 0: raise EnvironmentError(str(output))
def make_instance_snapshot_backup(instance, error, group, provider_class=VolumeProviderBase): LOG.info("Make instance backup for {}".format(instance)) provider = provider_class(instance) infra = instance.databaseinfra database = infra.databases.first() snapshot = Snapshot.create(instance, group, provider.volume) snapshot_final_status = Snapshot.SUCCESS locked = None driver = infra.get_driver() client = None try: client = driver.get_client(instance) locked = lock_instance(driver, instance, client) if not locked: snapshot_final_status = Snapshot.WARNING if 'MySQL' in type(driver).__name__: mysql_binlog_save(client, instance) response = provider.take_snapshot() snapshot.done(response) except Exception as e: errormsg = "Error creating snapshot: {}".format(e) error['errormsg'] = errormsg set_backup_error(infra, snapshot, errormsg) return snapshot finally: if locked: unlock_instance(driver, instance, client) output = {} command = "du -sb /data/.snapshot/%s | awk '{print $1}'" % ( snapshot.snapshot_name ) try: exec_remote_command_host(instance.hostname, command, output) size = int(output['stdout'][0]) snapshot.size = size except Exception as e: snapshot.size = 0 LOG.error("Error exec remote command {}".format(e)) backup_path = database.backup_path if backup_path: now = datetime.now() target_path = "{}/{}/{}/{}/{}".format( backup_path, now.strftime("%Y_%m_%d"), instance.hostname.hostname.split('.')[0], now.strftime("%Y%m%d%H%M%S"), infra.name ) snapshot_path = "/data/.snapshot/{}/data/".format( snapshot.snapshot_name ) output = {} command = """ if [ -d "{backup_path}" ] then rm -rf {backup_path}/20[0-9][0-9]_[0-1][0-9]_[0-3][0-9] & mkdir -p {target_path} cp -r {snapshot_path} {target_path} & fi """.format(backup_path=backup_path, target_path=target_path, snapshot_path=snapshot_path) try: exec_remote_command_host(instance.hostname, command, output) except Exception as e: LOG.error("Error exec remote command {}".format(e)) snapshot.status = snapshot_final_status snapshot.end_at = datetime.now() snapshot.save() register_backup_dbmonitor(infra, snapshot) return snapshot
def do(self): if not self.is_hostname_valid: script = '/sbin/reboot -f > /dev/null 2>&1 &' exec_remote_command_host(self.host, script)
def execute_scheduled_maintenance(self, maintenance_id): LOG.debug("Maintenance id: {}".format(maintenance_id)) maintenance = models.Maintenance.objects.get(id=maintenance_id) models.Maintenance.objects.filter(id=maintenance_id).update( status=maintenance.RUNNING, started_at=datetime.now() ) LOG.info("Maintenance {} is RUNNING".format(maintenance,)) worker_name = get_worker_name() task_history = TaskHistory.register( request=self.request, worker_name=worker_name ) task_history.relevance = TaskHistory.RELEVANCE_CRITICAL LOG.info("id: {} | task: {} | kwargs: {} | args: {}".format( self.request.id, self.request.task, self.request.kwargs, str(self.request.args) )) task_history.update_details( persist=True, details="Executing Maintenance: {}".format(maintenance) ) for hm in models.HostMaintenance.objects.filter(maintenance=maintenance): main_output = {} hm.status = hm.RUNNING hm.started_at = datetime.now() hm.save() if hm.host is None: hm.status = hm.UNAVAILABLEHOST hm.finished_at = datetime.now() hm.save() continue host = hm.host update_task = "\nRunning Maintenance on {}".format(host) if maintenance.disable_alarms: disable_alarms(hm.host) param_dict = {} params = models.MaintenanceParameters.objects.filter( maintenance=maintenance ) for param in params: param_function = get_function(param.function_name) param_dict[param.parameter_name] = param_function(host.id) main_script = build_context_script(param_dict, maintenance.main_script) exit_status = exec_remote_command_host(host, main_script, main_output) if exit_status == 0: hm.status = hm.SUCCESS else: if maintenance.rollback_script: rollback_output = {} hm.status = hm.ROLLBACK hm.save() rollback_script = build_context_script( param_dict, maintenance.rollback_script ) exit_status = exec_remote_command_host( host, rollback_script, rollback_output ) if exit_status == 0: hm.status = hm.ROLLBACK_SUCCESS else: hm.status = hm.ROLLBACK_ERROR hm.rollback_log = get_dict_lines(rollback_output) else: hm.status = hm.ERROR if maintenance.disable_alarms: enable_alarms(hm.host) update_task += "...status: {}".format(hm.status) task_history.update_details(persist=True, details=update_task) hm.main_log = get_dict_lines(main_output) hm.finished_at = datetime.now() hm.save() models.Maintenance.objects.filter(id=maintenance_id).update( status=maintenance.FINISHED, finished_at=datetime.now() ) task_history.update_status_for( TaskHistory.STATUS_SUCCESS, details='Maintenance executed succesfully' ) LOG.info("Maintenance: {} has FINISHED".format(maintenance))