Exemple #1
0
def get_ec_profiles():
    if request.method == 'GET' or request.method == 'POST':
        try:
            manageProfiles = ManageEcProfiles()
            profiles_list = manageProfiles.get_ec_profiles()
            if list_err in session:
                result = session["err"]
                session.pop("err")
                return render_template('admin/ec_profile/profiles_list.html' , err = result , profiles_list = profiles_list)
            elif list_success in session:
                result = session["success"]
                session.pop("success")
                return render_template('admin/ec_profile/profiles_list.html' , success = result , profiles_list = profiles_list)
            elif list_warning in session:
                result = session["warning"]
                session.pop("warning")
                return render_template('admin/ec_profile/profiles_list.html' , warning = result , profiles_list = profiles_list)
            return render_template('admin/ec_profile/profiles_list.html' , profiles_list = profiles_list)

        except CephException as e:
            if e.id == CephException.CONNECTION_TIMEOUT:
                result = session['err'] = "ui_admin_ceph_time_out"
            elif e.id == CephException.GENERAL_EXCEPTION:
                result = session['err'] = "ui_admin_ceph_general_exception"
            else:
                result = session['err'] = "ui_admin_ceph_general_exception"
                logger.error(e)
            return render_template('admin/ec_profile/profiles_list.html' , err = result)
 def run(self):
     try:
         status = False
         consul = ConsulAPI()
         failed_jobs = consul.get_replication_failed_jobs()
         if len(failed_jobs) > 0:
             failed_jobs_str = ""
             for job_id, job_info in failed_jobs.iteritems():
                 failed_jobs_str += "\n job id: " + job_id + " job name: " + job_info.job_name
                 status = consul.delete_failed_job(job_id)
             result = Result()
             result.plugin_name = self.get_plugin_name()
             result.title = gettext("core_message_notify_failed_jobs_title")
             result.message = '\n'.join(
                 gettext("core_message_notify_failed_jobs_body").split(
                     "\\n")).format(failed_jobs_str)
             self.__context.results.append(result)
             logger.info(result.message)
             logger.info("status of deleting failed jobs from consul is " +
                         str(status))
     except Exception as e:
         logger.exception(e)
         logger.error(
             "An error occurred while ReplicationNotificationPlugin was running."
         )
    def start(self, disk_id, pool):
        try:
            ceph_api = CephAPI()
            consul_api = ConsulAPI()

            attr = ceph_api.read_image_metadata(
                ConfigAPI().get_image_name_prefix() + disk_id, pool)
            petasan_meta = attr.get(ConfigAPI().get_image_meta_key())
            disk_meta = DiskMeta()
            if petasan_meta:
                disk_meta.load_json(petasan_meta)
            else:
                return Status.error

            consul_api.add_disk_resource(disk_meta.id, "disk")
            consul_api.add_disk_pool(disk_meta.id, pool)
            i = 0
            for p in disk_meta.paths:
                i += 1
                consul_api.add_disk_resource(
                    "/".join(["", disk_meta.id, str(i)]), None)

        except Exception as e:
            logger.error("Can not start disk %s" % disk_id)
            logger.exception(e.message)
            return Status.error
        return Status.done
Exemple #4
0
    def save_current_tunings(self, ceph, lio, post_script, storage_engine):
        config_api = ConfigAPI()
        path = config_api.get_current_tunings_path()

        if not os.path.exists(path):
            os.makedirs(path)
        with open(path + config_api.get_ceph_tunings_file_name(), 'w', ) as f:
            f.write(ceph)

        with open(path + config_api.get_lio_tunings_file_name(), 'w', ) as f:
            f.write(lio)

        with open(path + config_api.get_post_deploy_script_file_name(), 'w', ) as f:
            f.write(post_script)
        logger.info("Current tuning configurations saved.")

        # Save "storage_engine" in Cluster info #
        # ------------------------------------- #
        try:
            ci = self.get_cluster_info()
            ci.storage_engine = storage_engine
            self.set_cluster_network_info(ci)

        except Exception as ex:
            logger.error("Cannot add storage engine to cluster info , {}".format(ex.message))
Exemple #5
0
    def get_auth_pools(self, user_name):
        config = configuration()
        cluster_name = config.get_cluster_name()
        cmd = "ceph auth get client.{} --format json --cluster {}".format(
            user_name, cluster_name)
        ret, stdout, stderr = exec_command_ex(cmd)

        if ret != 0:
            if stderr:
                if 'Connection timed out' in stderr or 'error connecting' in stderr:
                    logger.error('Error in Ceph Connection cmd:' + cmd)
                    raise CephException(CephException.CONNECTION_TIMEOUT,
                                        'ConnectionTimeError')

            logger.error('General error in Ceph cmd:' + cmd)
            raise CephException(CephException.GENERAL_EXCEPTION,
                                'GeneralCephException')

        user_info = json.loads(stdout)
        pools_info = user_info[0]['caps']['osd']
        auth_pools = []
        if 'profile rbd pool=' in pools_info:
            auth_pools_list = pools_info.replace('profile rbd pool=', "")
            if "," in auth_pools_list:
                auth_pools = auth_pools_list.split(',')
            else:
                auth_pools.append(auth_pools_list)

        return auth_pools
Exemple #6
0
    def _get_new_plugins_instances(self, modules):

        plugins = []
        for cls in modules:
            try:
                # import plugins module
                mod_obj = __import__(cls)
                for i in str(cls).split(".")[1:]:
                    mod_obj = getattr(mod_obj, i)
                # Find all plugins in module and create instances
                for mod_prop in dir(mod_obj):
                    # Ignore private
                    if not str(mod_prop).startswith("__"):
                        attr = getattr(mod_obj, mod_prop)
                        attr_str = str(attr)
                        attr_type_str = str(type(attr))
                        # Find plugin from type ABCMeta , plugin class name contains 'plugin' and not contains base
                        if attr_type_str.find(
                                "ABCMeta") > -1 and attr_str.find(
                                    "Base") == -1 and attr_str.find("Plugin"):
                            instance = attr(self.__context)
                plugins.append(instance)
            except Exception as e:
                logger.error("Error load plugin {}.".format(cls))
        return plugins
    def do_connect(self):
        try:
            conf_api = ConfigAPI()

            # Get which ceph user is using this function #
            # ========================================== #
            users = Users()
            user_name = users.get_current_system_user().strip()
            if user_name == "root":
                user_name = "admin"

            # Get ceph user's keyring file path #
            # ================================= #
            ceph_auth = CephAuthenticator()

            cluster_name = configuration().get_cluster_name()

            cluster = rados.Rados(conffile=conf_api.get_ceph_conf_path(cluster_name),
                                  conf=dict(keyring=ceph_auth.get_keyring_path()), rados_id=user_name)
            cluster.connect()

            return cluster

        except Exception as e:
            logger.error("do_connect() Cannot connect to ceph cluster.")
            logger.exception(e.message)

            try:
                cluster.shutdown()
            except Exception as e:
                pass

            return -1
def save_user():
    if request.method == 'POST':
        try:
            user = User()
            user.name = request.form['name']
            user.user_name = request.form['userName']
            user.role_id = int(request.form['role'])
            user.password = request.form['userPassword']
            user.email = str(request.form['email'])
            try:
                if request.form['notify_mail'] == 'Notify':
                    user.notfiy = True
            except Exception as ex:
                user.notfiy = False
                logger.error("notify err")
            manage_user = ManageUser()
            status = manage_user.add_user(user)
            if status == ManageUserStatus.done:
                session['success'] = "ui_admin_add_user_suc"
                return redirect(url_for('user_controller.user_list'))
            elif status == ManageUserStatus.exists:
                session['err'] = "ui_admin_add_user_err_exists"
                return redirect(url_for('user_controller.add_user'), 307)
            elif status == ManageUserStatus.error:
                session['err'] = "ui_admin_add_user_err"
                return redirect(url_for('user_controller.add_user'), 307)
        except Exception as e:
            session['err'] = "ui_admin_add_user_err_exception"
            logger.error(e)
            return redirect(url_for('user_controller.add_user'), 307)
def edit_dest_cluster(cluster_name):
    if request.method == 'GET' or request.method == 'POST':
        mesg_err = ""
        mesg_success = ""
        mesg_warning = ""
        try:
            # get selected destination cluster
            manage_dest_cluster = ManageDestinationCluster()
            selected_dest_cluster = manage_dest_cluster.get_replication_dest_cluster(cluster_name)

            # get destination Cluster from html form.
            remote_ip = request.form['dest_cluster_ip']
            ssh_private_key = str(request.form['key'])
            user_name = request.form['userName']

            # fill destination Cluster entity .
            selected_dest_cluster.remote_ip = remote_ip
            selected_dest_cluster.ssh_private_key = ssh_private_key
            selected_dest_cluster.user_name = user_name

            # send job entity to the backend to be saved in the consul.
            manage_dest_cluster = ManageDestinationCluster()
            manage_dest_cluster.edit_destination_cluster(selected_dest_cluster)
            session['success'] = "ui_admin_edit_dest_cluster_success"
            return redirect(url_for('destination_cluster_controller.dest_clusters_list'))

        except Exception as e:
            logger.error(e)
            session['err'] = "ui_admin_edit_dest_cluster_error"
            return redirect(url_for('destination_cluster_controller.view_dest_cluster', cluster_name=cluster_name))
def test_active_clean_old():
    cluster_name = configuration().get_cluster_name()
    sleeps = [10, 15, 20, 25, 30, 40]
    tries = 5

    while tries:
        status = False
        try:
            out, err = exec_command(
                "ceph --cluster {} -f json pg stat".format(cluster_name))
            ceph_pg_stat = str(out).replace("'", '')
            ceph_pg_stat = json.loads(ceph_pg_stat)
            logger.info("Ceph status is " +
                        ceph_pg_stat['num_pg_by_state'][0]['name'])

            if str(ceph_pg_stat['num_pg_by_state'][0]
                   ['name']) == 'active+clean':
                status = True
            else:
                status = False
        except Exception as e:
            logger.error("Get ceph status returned error.\n" + e.message)

        if not status:
            tries -= 1
            sleep_seconds = sleeps.pop()
            logger.warning(
                'waiting %s seconds before retrying to check active+clean status',
                sleep_seconds)
            time.sleep(sleep_seconds)
        else:
            # Nautilius call pool init when active :
            call_cmd('rbd pool init rbd')
            break
Exemple #11
0
    def __sync_cluster_config_file(self):
        try:
            manage_conf = configuration()
            current_node_name = manage_conf.get_node_info().name
            cluster_info = manage_conf.get_cluster_info()
            config_api = ConfigAPI()

            for i in cluster_info.management_nodes:
                node_info = NodeInfo()
                node_info.load_json(json.dumps(i))

                if node_info.name != current_node_name:
                    ssh_obj = ssh()
                    if not ssh_obj.copy_file_to_host(
                            node_info.management_ip,
                            config_api.get_cluster_info_file_path()):
                        logger.error(
                            "Could not copy configuration file to {} server.".
                            format(node_info.name))
                        self.__status_report.success = False
                        self.__status_report.failed_tasks.append(
                            "core_cluster_deploy_couldnt_sync_config_file")
                        return False

        except Exception as ex:
            logger.exception(ex.message)
            self.__status_report.success = False
            self.__status_report.failed_tasks.append(
                "core_cluster_deploy_couldnt_sync_config_file")
            return False

        # copy_file_to_host
        return True
def create_osds_remote(remote_mons_ips_ls):
    config_api = ConfigAPI()
    remote_status = StatusReport()
    for remot_mon in remote_mons_ips_ls:
        ssh_obj = ssh()
        status = StatusReport()

        out, err = ssh_obj.exec_command(
            remot_mon,
            " python {} ".format(config_api.get_node_create_osd_script_path()))

        logger.info(" ".join([remot_mon, out]))

        if "/report/" in out:  # To avoid -- IndexError: list index out of range
            status.load_json(str(out.split("/report/")[1]))
        else:
            if err:
                status.load_json("Status Report Error , error : {}".format(
                    str(err)))
            else:
                status.load_json("Connection Error.")

        remote_status.failed_tasks.extend(status.failed_tasks)

        if not status.success:
            logger.error(
                "Cannot create osd for remote node {}".format(remot_mon))
            remote_status.success = False
            return remote_status

    return remote_status
    def run(self):
        '''
        This function will be executed when we call the start method of any object in our PoolCheckerThread class
        '''
        # Get which ceph user is using this function & get his keyring file path #
        # ====================================================================== #
        ceph_auth = CephAuthenticator()
        cmd = 'ceph pg ls-by-pool {} --format json-pretty {} --cluster {}'.format(
            self.pool, ceph_auth.get_authentication_string(),
            self.cluster_name)
        ret, stdout, stderr = exec_command_ex(cmd)

        if ret != 0:
            if stderr and ('Connection timed out' in stderr
                           or 'error connecting' in stderr):
                logger.error('Error in Ceph Connection cmd:' + cmd)
                raise CephException(CephException.CONNECTION_TIMEOUT,
                                    'ConnectionTimeError')

            logger.error('General error in Ceph cmd:' + cmd)
            raise CephException(CephException.GENERAL_EXCEPTION,
                                'GeneralCephException')

        output = stdout
        pgdp = PGDumpParser()
        pgdp.parse(output)

        self.active_pgs_num = pgdp.active_pgs
        self.active_osds_num = pgdp.active_osds

        return
Exemple #14
0
def remove_profile(profile_name):
    if request.method == 'GET' or request.method == 'POST':
        try:
            manageProfiles = ManageEcProfiles()
            manageProfiles.delete_ec_profile(profile_name)
            session['success'] = "ui_admin_delete_ecProfile_success"
            return redirect(url_for('ec_profile_controller.get_ec_profiles'))

        except ECProfileException as e:
            if e.id == ECProfileException.ECPROFILE_IN_USE:
                session['err'] = "ui_admin_delete_ecProfile_fail_in_use"
                return redirect(url_for('ec_profile_controller.get_ec_profiles'))

        except CephException as e:
            if e.id == CephException.CONNECTION_TIMEOUT:
                session['err'] = "ui_admin_ceph_time_out"
                return redirect(url_for('ec_profile_controller.get_ec_profiles'))

            elif e.id == CephException.GENERAL_EXCEPTION:
                session['err'] = "ui_admin_ceph_general_exception"
                return redirect(url_for('ec_profile_controller.get_ec_profiles'))

            session['err'] = "ui_admin_ceph_general_exception"
            return redirect(url_for('ec_profile_controller.get_ec_profiles'))

        except Exception as e:
            session['err'] = "ui_admin_delete_ecProfile_error"
            logger.error(e)
            return redirect(url_for('ec_profile_controller.get_ec_profiles'))
Exemple #15
0
    def auto(self, type=1):
        logger.info("User start auto reassignment paths.")
        assignments_stats = self.get_assignments_stats()
        if assignments_stats.is_reassign_busy:
            logger.error("There is already reassignment running.")
            raise Exception("There is already reassignment running.")

        ConsulAPI().drop_all_node_sessions(
            self.__app_conf.get_consul_assignment_path(),
            configuration().get_node_name())
        sleep(3)

        assignments_stats.paths = [
            path for path in assignments_stats.paths
            if len(path.node.strip()) > 0 and path.status == -1
        ]
        self.__context.paths = assignments_stats.paths
        self.__context.nodes = assignments_stats.nodes
        for plugin in self._get_new_plugins_instances(auto_plugins):
            if plugin.is_enable() and plugin.get_plugin_id() == type:
                paths_assignments = plugin.get_new_assignments()
                if len(paths_assignments) == 0:
                    logger.info("There is no node under average.")
                    return
                self.set_new_assignments(paths_assignments)
                break
        self.run()
def add_dest_cluster():
    if request.method == 'GET' or request.method == 'POST':
        try:
            if list_err in session:
                result = session["err"]
                session.pop("err")
                return render_template('/admin/replication/destination_clusters/add_dest_cluster.html', err=result)

            elif list_success in session:
                result = session["success"]
                session.pop("success")
                return render_template('/admin/replication/destination_clusters/add_dest_cluster.html', success=result)

            elif list_warning in session:
                result = session["warning"]
                session.pop("warning")
                return render_template('/admin/replication/destination_clusters/add_dest_cluster.html', warning=result)

            else:
                return render_template('/admin/replication/destination_clusters/add_dest_cluster.html')


        except Exception as e:
            session['err'] = "ui_admin_add_dest_cluster_error"
            logger.error(e)
            return redirect(url_for('destination_cluster_controller.dest_clusters_list'))
Exemple #17
0
    def _clean_iscsi_config(self, disk_id, path_index, iqn):

        logger.debug("Move action ,start clean disk {} path {}.".format(
            disk_id, path_index))

        lio_api = LioAPI()

        try:

            # Get tpgs for iqn.
            tpgs = lio_api.get_iqns_with_enabled_tpgs().get(iqn, None)
            if not iqn or not tpgs or len(tpgs) == 0:
                logger.info("Move action ,could not find ips for %s " %
                            disk_id)
            # Remove the assigned ips from our interfaces
            elif tpgs and len(tpgs) > 0:
                # Get assigned ips for each path.
                for tpg, ips in tpgs.iteritems():
                    if tpg == str(path_index + 1):
                        lio_api.disable_path(iqn, tpg)
                        logger.info(
                            "Move action,cleaned disk {} path {}.".format(
                                disk_id, path_index))
                        break
        except Exception as e:
            logger.error("Move action,could not clean disk path for %s" %
                         disk_id)
            return False
        logger.debug("Move action end clean disk {} path {}.".format(
            disk_id, path_index))
        return True
def view_dest_cluster(cluster_name):
    if request.method == 'GET' or request.method == 'POST':

        try:
            manage_dest_cluster = ManageDestinationCluster()
            selected_dest_cluster = manage_dest_cluster.get_replication_dest_cluster(cluster_name)

            if list_err in session:
                result = session["err"]
                session.pop("err")
                return render_template('admin/replication/destination_clusters/edit_dest_cluster.html',
                                       selected_dest_cluster=selected_dest_cluster, err=result)

            elif list_success in session:
                result = session["success"]
                session.pop("success")
                return render_template('admin/replication/destination_clusters/edit_dest_cluster.html',
                                       selected_dest_cluster=selected_dest_cluster, success=result)

            elif list_warning in session:
                result = session["warning"]
                session.pop("warning")
                return render_template('admin/replication/destination_clusters/edit_dest_cluster.html',
                                       selected_dest_cluster=selected_dest_cluster, warning=result)

            else:
                return render_template('admin/replication/destination_clusters/edit_dest_cluster.html',
                                       selected_dest_cluster=selected_dest_cluster)

        except Exception as e:
            result = session['err'] = "ui_admin_view_get_dest_cluster_info_error"
            logger.error(e)
            return render_template('admin/replication/destination_clusters/edit_dest_cluster.html', err=result,
                                   selected_dest_cluster=selected_dest_cluster)
def get_security_key_():
    # get the security code from the server we're connecting to
    ssh_exec = ssh()

    conf = configuration()
    cluster_info = conf.get_cluster_info()

    for cluster_node in cluster_info.management_nodes:
        remote_node_info = NodeInfo()
        remote_node_info.load_json(json.dumps(cluster_node))
        if remote_node_info.management_ip == conf.get_node_info(
        ).management_ip:
            continue
        command_result, err = ssh_exec.exec_command(
            remote_node_info.management_ip,
            'python ' + ConfigAPI().get_consul_encryption_key_script())

        if err is not None and str(err) != "":
            logger.error("Could not read Consul encryption key from node: " +
                         remote_node_info.management_ip)
            logger.error(err)
            print('command_result: ', command_result)
        else:
            key = str(command_result.splitlines()[0])
            if key is not None and key != "":
                return key
    return None
def edit_user(name):
    auth_pools = []
    active_pools = []
    user_info = ""
    if request.method == 'GET' or request.method == 'POST':
        try:
            user_info = ManageUsers().get_replication_user(name)
            manage_pools = ManagePools()
            active_pools = manage_pools.get_active_pools()
            if list_err in session:
                result = session["err"]
                session.pop("err")
                return render_template('/admin/replication/cluster_users/edit_user.html',user_info = user_info,active_pools = active_pools, err=result)

            elif list_success in session:
                result = session["success"]
                session.pop("success")
                return render_template('/admin/replication/cluster_users/edit_user.html',user_info = user_info,active_pools = active_pools, success=result)

            elif list_warning in session:
                result = session["warning"]
                session.pop("warning")
                return render_template('/admin/replication/cluster_users/edit_user.html',user_info = user_info,active_pools = active_pools, warning=result)

            else:
                return render_template('/admin/replication/cluster_users/edit_user.html',active_pools = active_pools,user_info = user_info)

        except ConsulException as e:
            logger.error(e)

        except Exception as e:
            logger.error(e)

        return render_template('/admin/replication/cluster_users/edit_user.html',active_pools = active_pools,user_info = user_info)
Exemple #21
0
def get_next_partition_index(dev):
    """
    Get the next free partition index on a given device.

    :return: Index number (> 1 if there is already a partition on the device)
    or 1 if there is no partition table.
    """
    try:
        output, err = exec_command('parted --machine -- {} print'.format(dev))
        lines = output
    except subprocess.CalledProcessError as e:
        logger.info('cannot read partition index; assume it '
                    'isn\'t present\n (Error: %s)' % e)
        return 1

    if not lines:
        raise logger.error('parted failed to output anything')
    logger.debug('get_free_partition_index: analyzing ' + lines)
    if ('CHS;' not in lines and
                'CYL;' not in lines and
                'BYT;' not in lines):
        raise logger.error('parted output expected to contain one of ' +
                           'CHH; CYL; or BYT; : ' + lines)
    if os.path.realpath(dev) not in lines:
        raise logger.error('parted output expected to contain ' + dev + ': ' + lines)
    _, partitions = lines.split(os.path.realpath(dev))
    numbers_as_strings = re.findall('^\d+', partitions, re.MULTILINE)
    partition_numbers = map(int, numbers_as_strings)
    if partition_numbers:
        return max(partition_numbers) + 1
    else:
        return 1
def reset_private_key(user_name):
    try:
        if request.method == 'GET' or request.method == 'POST':
            private_key = ManageUsers().reset_prv_key(user_name)
            return json.dumps(private_key)
    except ConsulException as e:
        logger.error(e)
Exemple #23
0
def main_catch(func, args):
    try:
        func(args)

    except Exception as e:
        logger.error(e.message)
        sys.exit(-1)
def users_list():
    if request.method == 'GET' or request.method == 'POST':
        users_list = []
        message = "ui_admin_replication_user_info_message"
        try:
            users_list = ManageUsers().get_replication_users()
            if list_err in session:
                result = session["err"]
                session.pop("err")
                return render_template('/admin/replication/cluster_users/users_list.html', users_list=users_list, err=result)

            elif list_success in session:
                result = session["success"]
                session.pop("success")
                return render_template('/admin/replication/cluster_users/users_list.html', users_list=users_list, success=result)

            elif list_warning in session:
                result = session["warning"]
                session.pop("warning")
                return render_template('/admin/replication/cluster_users/users_list.html', users_list=users_list, warning=result)

            else:
                return render_template('/admin/replication/cluster_users/users_list.html',info=message, users_list=users_list)

        except ConsulException as e:
            logger.error(e)

        except Exception as e:
            logger.error(e)

        return render_template('/admin/replication/cluster_users/users_list.html',info=message, users_list= users_list)
Exemple #25
0
    def add_ceph_user(self, user_name, pool_list):
        config = configuration()
        cluster_name = config.get_cluster_name()
        pool_string = ""
        if len(pool_list) > 0:
            for pool in pool_list:
                pool_string += "\'profile rbd pool=" + pool + "\',"
            if pool_string[-1] == ",":
                pool_string = pool_string[:-1]
        else:
            pool_string = "\'profile rbd\'"

        cmd = "ceph auth get-or-create client.{} mgr 'allow r' mon 'profile rbd' osd {} >> /etc/ceph/{}.client.{}.keyring  " \
              "--cluster {} ".format (user_name, pool_string, cluster_name, user_name, cluster_name)
        ret, stdout, stderr = exec_command_ex(cmd)
        if ret != 0:
            if stderr:
                if 'Connection timed out' in stderr or 'error connecting' in stderr:
                    logger.error('Error in Ceph Connection cmd:' + cmd)
                    raise CephException(CephException.CONNECTION_TIMEOUT,
                                        'ConnectionTimeError')

            logger.error('General error in Ceph cmd:' + cmd)
            raise CephException(CephException.GENERAL_EXCEPTION,
                                'GeneralCephException')
def add_rep_usr(name = ""):
    if request.method == 'GET' or request.method == 'POST':
        try:
            manage_pools = ManagePools()
            active_pools = manage_pools.get_active_pools()
            if list_err in session:
                result = session["err"]
                session.pop("err")
                return render_template('/admin/replication/cluster_users/add_user.html',name = name,active_pools = active_pools, err=result)

            elif list_success in session:
                result = session["success"]
                session.pop("success")
                return render_template('/admin/replication/cluster_users/add_user.html',name = name,active_pools = active_pools, success=result)

            elif list_warning in session:
                result = session["warning"]
                session.pop("warning")
                return render_template('/admin/replication/cluster_users/add_user.html',name = name,active_pools = active_pools, warning=result)

            else:

                return render_template('/admin/replication/cluster_users/add_user.html',active_pools = active_pools,name = name)

        except ConsulException as e:
            logger.error(e)

        except Exception as e:
            logger.error(e)

        return render_template('/admin/replication/cluster_users/add_user.html',active_pools = active_pools,name = name)
def create_vg(devices, vg_name):
    """
    Create a Volume Group. Command looks like :
        vgcreate --force --yes group_name device

    Once created the volume group is returned as a ``VolumeGroup`` object

    :param devices: A list of devices to create a VG. Optionally, a single
                    device (as a string) can be used.
    :param vg_name: The name of the VG
    """
    # vg = lvm.create_vg(devices, vg_name)
    cmd = "vgcreate --force --yes "
    cmd = cmd + vg_name

    for device in devices:
        cmd = cmd + " " + device

    ret, stdout, stderr = exec_command_ex(cmd)
    if ret != 0:
        if stderr:
            logger.error(
                "lvm_lib : Error creating volume group --> {}".format(stderr))
            return None

    vg = get_vg(vg_name)
    return vg
Exemple #28
0
    def set_new_assignments(self, paths_assignment_info):
        logger.info("Set new assignment.")
        if self.get_current_reassignment() is not None:
            raise Exception("There is already running assignment.")

        config_api = ConfigAPI()
        consul_api = ConsulAPI()
        logger.info("Delete old assignments.")
        consul_api.delete_assignments()
        session = consul_api.get_new_session_ID(
            config_api.get_assignment_session_name(),
            configuration().get_node_name(), True)
        if consul_api.lock_key(config_api.get_consul_assignment_path(),
                               session, "root"):
            logger.info("Lock assignment root.")
            for path_assignment_info in paths_assignment_info:
                path_assignment_info.status = ReassignPathStatus.pending
                consul_api.set_path_assignment(
                    path_assignment_info,
                    self._get_node_session(path_assignment_info.target_node))
                logger.info(
                    "New assignment for {} ,disk {}, from node {}  and to node {} with status {}"
                    .format(path_assignment_info.ip,
                            path_assignment_info.disk_id,
                            path_assignment_info.node,
                            path_assignment_info.target_node,
                            path_assignment_info.status))
        else:
            logger.error("Can't lock paths assignment key.")
            raise Exception("Can't lock paths assignment key.")
def main_catch(func, args):
    try:
        func(args)

    except Exception as e:
        logger.error(e.message)
        print('-1')
    def save_session(self, app, session, response):
        try:
            domain = self.get_cookie_domain(app)
            consul_base_api = BaseAPI()
            if not session:
                consul_base_api.delete_key("".join(
                    [consul_session_key, self.sid, "/"]))
                response.delete_cookie(app.session_cookie_name, domain=domain)
                return
            else:
                if session.sid == '-1':
                    raise ConnectionError()

            cookie_exp = self.get_expiration_time(app, session)
            if session.sid != '-1':
                consul_base_api.write_value(
                    "".join([consul_session_key, session.sid, "/_exp"]),
                    str(cookie_exp))
            response.set_cookie(app.session_cookie_name,
                                session.sid,
                                httponly=True,
                                domain=domain)
        except ConnectionError:
            logger.error("Error on consul connection to save session")
            response.delete_cookie(app.session_cookie_name, domain=domain)
            sleep(1)