Exemplo n.º 1
0
    def add_replication_job(self, job_entity, src_disk_meta):
        consul_api = ConsulAPI()
        jobs = consul_api.get_replication_jobs()

        for job_id, job in jobs.items():
            if job.job_name == job_entity.job_name:
                raise ReplicationException(ReplicationException.DUPLICATE_NAME, "Duplicate replication job name error.")

        manage_remote_replication = ManageRemoteReplication()

        manage_remote_replication.disk_id = job_entity.destination_disk_id
        manage_remote_replication.cluster_name = job_entity.destination_cluster_name
        job_id = consul_api.get_next_job_id()

        source_fsid = ceph_disk.get_fsid(configuration().get_cluster_name())
        job_entity.job_id = job_id
        job_entity.source_cluster_fsid = source_fsid

        src_disk_meta.replication_info["src_cluster_fsid"] = source_fsid

        mng_rep_info = ManageDiskReplicationInfo()
        src_disk_meta = mng_rep_info.set_replication_info(job_entity.destination_cluster_name, src_disk_meta)

        replication_info = src_disk_meta.replication_info

        # update source and destination disks meta.
        manage_remote_replication.update_replication_info(replication_info)
        mng_rep_info.update_replication_info(src_disk_meta, replication_info)
        system_date_time = str(datetime.datetime.now()).split('.')[0]

        # save job in consul
        consul_api.update_replication_job(job_entity)

        # Saving log in Consul :
        log_text = "{} - Job {} has been created.".format(system_date_time, job_id)
        self.log_replication_job(job_id, log_text)

        # start replication job:
        self.start_replication_job(job_entity)
    def build(self, job_entity, snapshot_ls, active_job_id, dest_cluster,
              sshkey_path):
        '''
        This function will form the command of RBD Replication
        '''
        ceph_api = CephAPI()

        source_disk_id = job_entity.source_disk_id
        source_disk_name = 'image-' + source_disk_id
        source_pool_name = ceph_api.get_pool_bydisk(source_disk_id)

        # If pool inactive #
        if source_pool_name is None:
            raise PoolException(
                PoolException.CANNOT_GET_POOL,
                "Cannot get pool of disk " + str(source_disk_id))

        dest_user_name = dest_cluster.user_name
        dest_cluster_ip = dest_cluster.remote_ip

        destination_disk_id = job_entity.destination_disk_id
        destination_disk_name = 'image-' + destination_disk_id

        # Getting destination disk metadata ( to get the destination disk pool ) :
        from PetaSAN.backend.replication.manage_remote_replication import ManageRemoteReplication
        manage_remote_rep = ManageRemoteReplication()
        manage_remote_rep.cluster_name = job_entity.destination_cluster_name
        manage_remote_rep.disk_id = job_entity.destination_disk_id

        dest_meta_dict = manage_remote_rep.get_disk_meta()
        dest_disk_meta_obj = DiskMeta(dest_meta_dict)

        destination_pool_name = dest_disk_meta_obj.pool

        compression_algorithm = job_entity.compression_algorithm

        # Getting paths of "md5" and "progress" files #
        config_api = ConfigAPI()
        md5_1_path = config_api.get_replication_md5_1_file_path(active_job_id)
        md5_2_path = config_api.get_replication_md5_2_file_path(active_job_id)

        replication_progress_comp_file_path = config_api.get_replication_progress_comp_file_path(
            active_job_id)
        replication_progress_upcomp_file_path = config_api.get_replication_progress_uncomp_file_path(
            active_job_id)
        replication_progress_import_file_path = config_api.get_replication_progress_import_file_path(
            active_job_id)

        # Getting "pipe_reader" script path #
        script_file = ConfigAPI().get_replication_pipe_reader_script_path()

        auth_string = " -n client." + dest_user_name + " --keyring=/etc/ceph/ceph.client." + dest_user_name + ".keyring"

        # Building Command :
        # ==================
        cmd = 'rbd export-diff '

        if len(snapshot_ls) == 1:
            cmd = cmd + source_pool_name + '/' + source_disk_name + '@' + snapshot_ls[
                0]
        else:
            cmd = cmd + '--from-snap ' + snapshot_ls[
                0] + ' ' + source_pool_name + '/' + source_disk_name + '@' + snapshot_ls[
                    1]

        cmd = cmd + ' - | tee >(md5sum | cut -d \' \' -f 1 > ' + md5_1_path + ') | ' + script_file + ' ' + replication_progress_upcomp_file_path + ' | '

        if len(compression_algorithm) > 0:
            cmd = cmd + compression_algorithm + ' | ' + script_file + ' ' + replication_progress_comp_file_path + ' | '

        cmd = cmd + 'ssh -o StrictHostKeyChecking=no -i ' + sshkey_path + ' ' + dest_user_name + '@' + dest_cluster_ip + ' "'

        if len(compression_algorithm) > 0:
            cmd = cmd + compression_algorithm + ' -d | '

        cmd = cmd + 'tee >(md5sum | cut -d \' \' -f 1 > ' + md5_2_path + ') | rbd import-diff - ' + destination_pool_name + '/' + destination_disk_name + auth_string + '"' + ' 2> ' + replication_progress_import_file_path

        return cmd


# ----------------------------------------------------------------------------------------------------------------------
def edit_job(job_id):
    """
    DOCSTRING : this function is called when submit the Edit replication job form page.
    Args : None
    Returns : render to the template page : 'admin/replication/job_list.html'
    """
    if request.method == 'GET' or request.method == 'POST':
        mesg_err = ""
        mesg_success = ""
        mesg_warning = ""

        try:
            local_replication = ReplicationHandler()
            remote_replication = ManageRemoteReplication()
            manage_replication = ManageReplicationJobs()
            replication_job = get_selected_Job_info(job_id)
            old_node = replication_job.node_name

            job_name = request.form['job_name']
            schedule = str((request.form['schedule_object']))
            schedule_object = eval(schedule)
            backup_node = request.form['backup_node']
            source_cluster_name = request.form['source_cluster']
            dest_cluster_name = request.form['dest_cluster_name']
            rep_compression_mode = request.form['rep_compression_mode']

            if rep_compression_mode == 'enabled':
                rep_compression_algorithm = request.form[
                    'rep_compression_algorithm']
                replication_job.compression_algorithm = rep_compression_algorithm
            if rep_compression_mode == 'disabled':
                replication_job.compression_algorithm = ""

            pre_snap_script = request.form['pre_snap_script']
            post_snap_script = request.form['post_snap_script']
            post_job_complete = request.form['post_job_complete']

            remote_replication.disk_id = replication_job.destination_disk_id
            remote_replication.cluster_name = dest_cluster_name
            replication_job.job_name = job_name
            replication_job.schedule = schedule_object
            replication_job.node_name = backup_node
            replication_job.source_cluster_name = source_cluster_name
            replication_job.destination_cluster_name = dest_cluster_name
            replication_job.pre_snap_url = pre_snap_script
            replication_job.post_snap_url = post_snap_script
            replication_job.post_job_complete = post_job_complete

            # update src and dest disk meta.replication_info.
            manage_destination_cluster = ManageDestinationCluster()
            manage_replication.edit_replication_job(replication_job, old_node)
            session['success'] = "ui_admin_edit_job_success"
            return redirect(url_for('replication_controller.job_list'))

        except ReplicationException as e:
            logger.error(e)
            if e.id == ReplicationException.CONNECTION_REFUSED:
                session['err'] = "ui_admin_add_job_connction_refused_err"
                return redirect(url_for('replication_controller.add_job'))

            elif e.id == ReplicationException.CONNECTION_TIMEOUT:
                session['err'] = "ui_admin_add_job_connction_timeout_err"
                return redirect(url_for('replication_controller.add_job'))

            elif e.id == ReplicationException.DUPLICATE_NAME:
                session['err'] = "ui_admin_add_job_duplicate_err"
                return redirect(url_for('replication_controller.add_job'))

            elif e.id == ReplicationException.PERMISSION_DENIED:
                session['err'] = "Permission Denied."
                return redirect(url_for('replication_controller.add_job'))

            elif e.id == ReplicationException.GENERAL_EXCEPTION:
                session['err'] = "core_consul_exception_cant_edit_rep_job"
                return redirect(url_for('replication_controller.add_job'))

        except Exception as e:
            logger.error(e)
            session['err'] = "ui_admin_edit_job_error"
            return redirect(url_for('replication_controller.view_job'))
def save_job():
    """
    DOCSTRING : this function is called at saving a new replication job , if saving operation is succeeded it renders to the
    template page : 'admin/replication/job_list.html' with a success message , and if not it redirects
    to : 'admin/replication/add_replication_job.html' with an error message ...
    Args : None
    Returns : in success , it renders to : 'admin/replication/job_list.html' with a success message
    in failure , it redirects to : 'admin/replication/add_replication_job.html'
    """
    if request.method == 'GET' or request.method == 'POST':
        mesg_err = ""
        mesg_success = ""
        mesg_warning = ""

        try:
            replication_job = ReplicationJob()
            local_replication = ReplicationHandler()
            remote_replication = ManageRemoteReplication()
            manage_replication = ManageReplicationJobs()

            # get replication job from html form.
            job_name = request.form['job_name']
            schedule_object = json.loads(request.form['schedule_object'])
            backup_node = request.form['backup_node']
            source_disk = request.form['source_disk']
            source_cluster_name = request.form['source_cluster']
            dest_cluster_name = request.form['dest_cluster_name']
            destination_disk = request.form['destination_disk']
            rep_compression_mode = request.form['rep_compression_mode']

            if rep_compression_mode == 'enabled':
                rep_compression_algorithm = request.form[
                    'rep_compression_algorithm']
                replication_job.compression_algorithm = rep_compression_algorithm

            pre_snap_script = request.form['pre_snap_script']
            post_snap_script = request.form['post_snap_script']
            post_job_complete = request.form['post_job_complete']

            src_disk_meta = manage_replication.get_src_disk_meta(source_disk)
            remote_replication.disk_id = destination_disk
            remote_replication.cluster_name = dest_cluster_name
            dest_disk_meta_dict = remote_replication.get_disk_meta()
            dest_disk_meta_obj = DiskMeta(dest_disk_meta_dict)

            # check if both source and destination disks have the same size.
            if src_disk_meta.size != dest_disk_meta_obj.size:
                session[
                    'err'] = "ui_admin_add_job_error_scr_dest_size_mismatch"
                return redirect(url_for('replication_controller.add_job'))
            if src_disk_meta.replication_info or dest_disk_meta_obj.replication_info:
                session[
                    'err'] = "ui_admin_add_job_error_scr_dest_have_replication_info"
                return redirect(url_for('replication_controller.add_job'))

            # fill replication job entity .
            replication_job.job_name = job_name
            replication_job.schedule = schedule_object
            replication_job.node_name = backup_node
            replication_job.source_cluster_name = source_cluster_name
            replication_job.source_disk_id = source_disk
            replication_job.destination_disk_id = destination_disk
            replication_job.destination_cluster_name = dest_cluster_name
            replication_job.pre_snap_url = pre_snap_script
            replication_job.post_snap_url = post_snap_script
            replication_job.post_job_complete = post_job_complete

            # update src and dest disk meta.replication_info.
            replication_info = {
                'src_disk_id': replication_job.source_disk_id,
                'src_disk_name': src_disk_meta.disk_name,
                'src_cluster_fsid': "",
                'src_cluster_name': replication_job.source_cluster_name,
                'dest_disk_id': replication_job.destination_disk_id,
                'dest_disk_name': dest_disk_meta_obj.disk_name,
                'dest_cluster_name': replication_job.destination_cluster_name,
                'dest_cluster_ip': "",
                'dest_cluster_fsid': ""
            }
            src_disk_meta.replication_info = replication_info

            # send job entity to the backend to be saved in the consul.
            manage_replication.add_replication_job(replication_job,
                                                   src_disk_meta)
            session['success'] = "ui_admin_add_job_success"
            return redirect(url_for('replication_controller.job_list'))

        except ReplicationException as e:
            logger.error(e)
            if e.id == ReplicationException.CONNECTION_REFUSED:
                session['err'] = "ui_admin_add_job_connction_refused_err"
                return redirect(url_for('replication_controller.add_job'))

            elif e.id == ReplicationException.CONNECTION_TIMEOUT:
                session['err'] = "ui_admin_add_job_connction_timeout_err"
                return redirect(url_for('replication_controller.add_job'))

            elif e.id == ReplicationException.DUPLICATE_NAME:
                session['err'] = "ui_admin_add_job_duplicate_err"
                return redirect(url_for('replication_controller.add_job'))

            elif e.id == ReplicationException.PERMISSION_DENIED:
                session['err'] = "Permission Denied."
                return redirect(url_for('replication_controller.add_job'))

            elif e.id == ReplicationException.GENERAL_EXCEPTION:
                session['err'] = "core_consul_exception_cant_add_rep_job"
                return redirect(url_for('replication_controller.add_job'))

        except Exception as e:
            logger.error(e)
            session['err'] = "ui_admin_add_job_error"
            return redirect(url_for('replication_controller.add_job'))
Exemplo n.º 5
0
    def start_replication_job(self, job_entity):
        # Get destination disk metadata :
        manage_remote_rep = ManageRemoteReplication()
        manage_remote_rep.cluster_name = job_entity.destination_cluster_name
        manage_remote_rep.disk_id = job_entity.destination_disk_id

        # FIRST : Getting metadata destination disk #           Check if destination disk exists
        try:
            dest_meta_dict = manage_remote_rep.get_disk_meta()
            dest_disk_meta_obj = DiskMeta(dest_meta_dict)

            if dest_meta_dict:
                # Check if destination "replication-info" is not empty  // means disk is replication enabled // :
                dest_replication_info = dest_disk_meta_obj.replication_info

                if dest_replication_info:
                    # Change the job status to be "started" :
                    job_entity.status = "started"

                    # Save new job in consul or edit status of existed job :
                    consul_api = ConsulAPI()
                    consul_api.update_replication_job(job_entity)  # Saving new job in Consul or Updating stopped job

                    # Call script to build crontab :
                    self.start_node_service()

                    # Define a text to add in job's log as : "system_date_time : job {job_id} has been started"
                    system_date_time = str(datetime.datetime.now()).split('.')[0]
                    job_id = job_entity.job_id
                    text = "{} - Job {} has been started.".format(system_date_time, job_id)

                    # Saving log in Consul :
                    self.log_replication_job(job_id, text)

                else:
                    # Define a text to add in job's log as : "system_date_time : job {job_id} has been failed to start , destination disk isn't replication enabled"
                    system_date_time = str(datetime.datetime.now()).split('.')[0]
                    job_id = job_entity.job_id
                    text = "{} - Job {} has been failed to start , destination disk doesn't have replication info.".format(
                        system_date_time, job_id)
                    self.log_replication_job(job_id, text)


        except ReplicationException as e:
            if e.id == ReplicationException.CONNECTION_TIMEOUT:
                logger.error("Job {} | Cannot access destination disk | Connection Timed Out , {}".format(str(job_entity.job_id), str(e.message)))

            elif e.id == ReplicationException.CONNECTION_REFUSED:
                logger.error("Job {} | Cannot access destination disk | Connection Refused , {}".format(str(job_entity.job_id), str(e.message)))

            elif e.id == ReplicationException.PERMISSION_DENIED:
                logger.error("Job {} | Cannot access destination disk | Permission Denied , {}".format(str(job_entity.job_id), str(e.message)))

            elif e.id == ReplicationException.GENERAL_EXCEPTION:
                logger.error("Job {} | Cannot access destination disk | {}".format(str(job_entity.job_id), str(e.message)))

        except PoolException as e:
            if e.id == PoolException.CANNOT_GET_POOL:
                logger.error("Job {} | Destination disk not found | {}".format(str(job_entity.job_id), str(e.message)))

        except DiskListException as e:
            if e.id == DiskListException.DISK_NOT_FOUND:
                logger.error("Job {} | Destination disk not found | {}".format(str(job_entity.job_id), str(e.message)))

        except CephException as e:
                if e.id == CephException.GENERAL_EXCEPTION:
                    logger.error("Job {} | Destination disk not found | {}".format(str(job_entity.job_id), str(e.message)))

        except MetadataException as e:
            logger.error("Job {} | Destination disk not found | {}".format(str(job_entity.job_id), str(e.message)))

        except Exception as e:
            logger.error("Job {} | Cannot access destination disk | {}".format(str(job_entity.job_id), str(e.message)))
Exemplo n.º 6
0
    def delete_replication_job(self, job_entity):
        # Setting data of remote cluster #
        # ------------------------------ #
        manage_remote_rep = ManageRemoteReplication()
        manage_remote_rep.cluster_name = job_entity.destination_cluster_name
        manage_remote_rep.disk_id = job_entity.destination_disk_id

        # FIRST : Getting metadata destination disk #           Check if destination disk exists
        try:
            dest_disk_meta = manage_remote_rep.get_disk_meta()
            # dest_disk_meta_obj = DiskMeta(dest_disk_meta)

            if dest_disk_meta:
                # Clear "replication_info" from Destination Disk :
                dest_check = manage_remote_rep.delete_replication_info()

                # Delete all Destination Disk Snapshots :
                manage_remote_rep.delete_dest_snapshots()

        except ReplicationException as e:
            if e.id == ReplicationException.CONNECTION_TIMEOUT:
                logger.error("Job {} | Cannot access destination disk | Connection Timed Out , {}".format(str(job_entity.job_id), str(e.message)))

            elif e.id == ReplicationException.CONNECTION_REFUSED:
                logger.error("Job {} | Cannot access destination disk | Connection Refused , {}".format(str(job_entity.job_id), str(e.message)))

            elif e.id == ReplicationException.PERMISSION_DENIED:
                logger.error("Job {} | Cannot access destination disk | Permission Denied , {}".format(str(job_entity.job_id), str(e.message)))

            elif e.id == ReplicationException.GENERAL_EXCEPTION:
                logger.error("Job {} | Cannot access destination disk | {}".format(str(job_entity.job_id), str(e.message)))

        except PoolException as e:
            if e.id == PoolException.CANNOT_GET_POOL:
                logger.error("Job {} | Destination disk not found | {}".format(str(job_entity.job_id), str(e.message)))

        except DiskListException as e:
            if e.id == DiskListException.DISK_NOT_FOUND:
                logger.error("Job {} | Destination disk not found | {}".format(str(job_entity.job_id), str(e.message)))

        except CephException as e:
                if e.id == CephException.GENERAL_EXCEPTION:
                    logger.error("Job {} | Destination disk not found | {}".format(str(job_entity.job_id), str(e.message)))

        except MetadataException as e:
            logger.error("Job {} | Destination disk not found | {}".format(str(job_entity.job_id), str(e.message)))

        except Exception as e:
            logger.error("Job {} | Cannot access destination disk | {}".format(str(job_entity.job_id), str(e.message)))


        # rep_handler = ReplicationHandler()

        # SECOND : Getting metadata source disk :           Check if source disk exists
        src_disk_id = job_entity.source_disk_id

        try:
            src_disk_meta = self.get_src_disk_meta(src_disk_id)

            if src_disk_meta:
                # Clear "replication_info" from Source Disk :
                mng_rep_info = ManageDiskReplicationInfo()
                src_check = mng_rep_info.delete_replication_info(src_disk_meta)

                # Delete all Source Disk Snapshots :
                ceph_api = CephAPI()
                pool_name = ceph_api.get_pool_bydisk(job_entity.source_disk_id)

                # If pool inactive #
                if pool_name is None:
                    logger.error("Job {} | Cannot get pool of disk {}".format(str(job_entity.job_id), str(job_entity.source_disk_id)))
                else:
                    self.delete_snapshots(pool_name, job_entity.source_disk_id)

            else:
                logger.error("Job {} | Cannot access source disk ...".format(str(job_entity.job_id)))

        except Exception as e:
            # -- add log -- #
            system_date_time = str(datetime.datetime.now()).split('.')[0]
            text = "{} - Job {} | Cannot access source disk. {}".format(system_date_time, str(job_entity.job_id), str(e.message))
            self.log_replication_job(job_entity.job_id, text)

        # Delete Replication Job + Delete Replication Job Log --> from consul :
        consul_api = ConsulAPI()
        consul_api.delete_replication_job(job_entity)  # Deleting Replication job from Consul
        consul_api.delete_replication_log(job_entity)  # Deleting Replication job log from Consul

        # Define a text to add in Node's log as : "The replication job {job_id} has been deleted"
        logger.info("The replication job {} has been deleted.".format(job_entity.job_id))

        # Call script to build crontab :
        self.start_node_service()
Exemplo n.º 7
0
    def stop_replication_job(self, job_entity):
        job_id = job_entity.job_id

        # Setting data of remote cluster #
        # ------------------------------ #
        manage_remote_rep = ManageRemoteReplication()
        manage_remote_rep.cluster_name = job_entity.destination_cluster_name
        manage_remote_rep.disk_id = job_entity.destination_disk_id

        # FIRST : Getting metadata destination disk #           Check if destination disk exists
        try:
            dest_disk_meta = manage_remote_rep.get_disk_meta()
            # dest_disk_meta_obj = DiskMeta(dest_disk_meta)

            if dest_disk_meta:
                # Delete all Destination Disk Snapshots :
                manage_remote_rep.delete_dest_snapshots()

        except ReplicationException as e:
            system_date_time = str(datetime.datetime.now()).split('.')[0]
            text = "{} - Job {} | Cannot access destination disk.".format(system_date_time, job_id)

            if e.id == ReplicationException.CONNECTION_TIMEOUT:
                text = "{} - Job {} | Cannot access destination disk | Connection Timed Out , {}".format(system_date_time, job_id, str(e.message))

            elif e.id == ReplicationException.CONNECTION_REFUSED:
                text = "{} - Job {} | Cannot access destination disk | Connection Refused , {}".format(system_date_time, job_id, str(e.message))

            elif e.id == ReplicationException.PERMISSION_DENIED:
                text = "{} - Job {} | Cannot access destination disk | Permission Denied , {}".format(system_date_time, job_id, str(e.message))

            elif e.id == ReplicationException.GENERAL_EXCEPTION:
                text = "{} - Job {} | Cannot access destination disk | {}".format(system_date_time, job_id, str(e.message))

            self.log_replication_job(job_id, text)

        except PoolException as e:
            if e.id == PoolException.CANNOT_GET_POOL:
                system_date_time = str(datetime.datetime.now()).split('.')[0]
                text = "{} - Job {} | Destination disk not found | {}".format(system_date_time, job_id, str(e.message))
                self.log_replication_job(job_id, text)

        except DiskListException as e:
            if e.id == DiskListException.DISK_NOT_FOUND:
                system_date_time = str(datetime.datetime.now()).split('.')[0]
                text = "{} - Job {} | Destination disk not found | {}".format(system_date_time, job_id, str(e.message))
                self.log_replication_job(job_id, text)

        except CephException as e:
            if e.id == CephException.GENERAL_EXCEPTION:
                system_date_time = str(datetime.datetime.now()).split('.')[0]
                text = "{} - Job {} | Destination disk not found | {}".format(system_date_time, job_id, str(e.message))
                self.log_replication_job(job_id, text)

        except MetadataException as e:
            system_date_time = str(datetime.datetime.now()).split('.')[0]
            text = "{} - Job {} | Destination disk not found | {}".format(system_date_time, job_id, str(e.message))
            self.log_replication_job(job_id, text)

        except Exception as e:
            system_date_time = str(datetime.datetime.now()).split('.')[0]
            job_id = job_entity.job_id
            text = "{} - Job {} | Cannot access destination disk | General Exception , {}".format(system_date_time, job_id, str(e.message))
            self.log_replication_job(job_id, text)


        # SECOND : Getting metadata source disk :           Check if source disk exists
        src_disk_id = job_entity.source_disk_id

        try:
            src_disk_meta = self.get_src_disk_meta(src_disk_id)

            if src_disk_meta:
                # Delete all Source Disk Snapshots :
                ceph_api = CephAPI()
                pool_name = ceph_api.get_pool_bydisk(job_entity.source_disk_id)

                # If pool inactive #
                if pool_name is None:
                    logger.error("Job {} | Cannot get pool of disk {}".format(str(job_entity.job_id), str(job_entity.source_disk_id)))
                else:
                    # rep_handler = ReplicationHandler()
                    self.delete_snapshots(pool_name, job_entity.source_disk_id)

            else:
                # -- add log -- #
                system_date_time = str(datetime.datetime.now()).split('.')[0]
                text = "{} - Job {} | Cannot access source disk.".format(system_date_time, job_id)
                self.log_replication_job(job_id, text)

        except Exception as e:
            # -- add log -- #
            system_date_time = str(datetime.datetime.now()).split('.')[0]
            text = "{} - Job {} | Cannot access source disk. {}".format(system_date_time, job_id, str(e.message))
            self.log_replication_job(job_id, text)

        # Edit status of Replication Job :
        consul_api = ConsulAPI()
        job_entity.status = "stopped"  # Changing the job status to be "stopped"
        consul_api.update_replication_job(job_entity)  # Updating the job

        # Call script to build crontab :
        self.start_node_service()

        # Define a text to add in job's log as : "system_date_time : job {job_id} has been stopped"
        system_date_time = str(datetime.datetime.now()).split('.')[0]
        text = "{} - Job {} has been stopped.".format(system_date_time, job_id)

        # Saving log in Consul :
        self.log_replication_job(job_id, text)