コード例 #1
0
            cclist.append(node)

            # Update local /etc/hosts
            if not cc.options.roleonly and not cc.options.dns:
                update_local_etc_hosts(cc)

            # Release cluster lock and start attaching
            bigdata.release("cluster")
            node.attach(cc, out)
    elif cc.type == "mongodb":
        # Decide role
        role = "unknown"
        if options.role != None:
            role = options.role
        else:
            (roles, rolemap) = mongodb.generate_role_list(cc, len(cc.everything) + 1)
            if len(cc.configs) < rolemap["configs"]:
                role = "config"
            else:
                role = "shard"

        # Choose the node list of config context
        cclist = None
        if role == "config":
            cclist = cc.configs
        elif role == "shard":
            cclist = cc.shards
        else:
            raise MgmtException("Invalid MongoDB role: %s" % role)

        # Generic attach
コード例 #2
0
    def run(self):
        cc = self.__cc
        try:
            tasks = []
            for node in cc.everything:
                if node.role in self.__node_roles:
                    task = DiskSpaceAnalyzerTask(node)
                    tasks.append(task)
                    self.__mgr.add_task(task)
            self.__mgr.wait_for_completion()

            # Calculate per-role usages results
            usage_per_role = {}
            total_used = 0
            total_capacity = 0
            for role in self.__node_roles:
                group_total_used = 0
                group_total_capacity = 0
                for task in tasks:
                    if task.node.role == role:
                        if task.capacity != None and task.capacity > 0 and task.used > 0:
                            usage = 100.0 * (task.used / task.capacity)
                            total_used += task.used
                            total_capacity += task.capacity
                            group_total_used += task.used
                            group_total_capacity += task.capacity
                group_total_usage = 100.0 * (group_total_used / group_total_capacity)
                if group_total_capacity > 0:
                    usage_per_role[role] = 100.0 * (group_total_used / group_total_capacity)
                else:
                    usage_per_role[role] = None
            total_usage = 100.0 * (total_used / total_capacity)

            # Print results to stdout
            out.info("Disk usage per role:")
            for role in usage_per_role:
                usage = usage_per_role[role]
                if usage != None:
                    out.info("  %ss: %.1f%%" % (role, usage))
                else:
                    out.info("  %ss:  ---" % (role))
            out.info("  Total: %.1f%%" % total_usage)

            # Print results to XML
            out.extra_xml += '  <current-disk-usage total="%.1f">\n' % total_usage
            for role in usage_per_role:
                usage = usage_per_role[role]
                if usage != None:
                    out.extra_xml += '    <disk-usage role="%s" total="%.1f">\n' % (role, usage)
                else:
                    out.extra_xml += '    <disk-usage role="%s" total="">\n' % (role)
            out.extra_xml += "  </current-disk-usage\n"

            # Calcualte an estimate for changed cluster size
            if options.estimate:
                current_cluster_size = len(cc.everything)
                new_cluster_size = int(options.estimate)

                # Get role list
                roles = []
                if cc.type == "hbase" or cc.type == "hadoop":
                    (roles, counts) = hbase.generate_role_list(cc, current_cluster_size)
                elif cc.type == "mongodb":
                    (roles, counts) = mongodb.generate_role_list(cc, current_cluster_size)
                else:
                    raise MgmtException("Unknown cluster type: %s" % cc.type)

                if new_cluster_size <= current_cluster_size:
                    # Make role counts lists
                    role_counts = {}
                    for role in self.__node_roles:
                        role_counts[role] = 0
                    for node in cc.everything:
                        if node.role in role_counts:
                            role_counts[node.role] += 1
                    while len(roles) > new_cluster_size:
                        r = roles.pop()
                        role_counts[r] -= 1

                    # Recount after down-scaling
                    scaling_ok = True
                    scaling_usage_per_role = {}
                    for role in self.__node_roles:
                        role_total_used = 0
                        role_total_capacity = 0
                        for task in tasks:
                            if task.node.role == role:
                                if task.capacity != None and task.capacity > 0 and task.used > 0:
                                    role_total_used += task.used
                                    if role_counts[role] > 0:
                                        role_total_capacity += task.capacity
                                        role_counts[role] -= 1
                        if role_total_capacity > 0:
                            scaling_usage_per_role[role] = 100.0 * role_total_used / role_total_capacity
                            if role_total_used > 0.8 * role_total_capacity:
                                scaling_ok = False
                        else:
                            scaling_usage_per_role[role] = None
                            scaling_ok = False

                    # Print stdout
                    out.info("\nDisk usage per role after scaling")
                    for role in scaling_usage_per_role:
                        scaling_usage = scaling_usage_per_role[role]
                        if scaling_usage != None:
                            out.info("  %ss: %.1f%%" % (role, scaling_usage))
                        else:
                            out.info("  %ss:  ---" % (role))
                    if not scaling_ok:
                        out.warn("WARNING: down-scaling can result loss of data!")

                    # Print XML
                    if scaling_ok:
                        out.extra_xml += '  <scaling-disk-usage result="ok">\n'
                    else:
                        out.extra_xml += '  <scaling-disk-usage result="danger">\n'
                    for role in scaling_usage_per_role:
                        scaling_usage = scaling_usage_per_role[role]
                        if scaling_usage != None:
                            out.extra_xml += '    <disk-usage role="%s" usage="%.1f"/>\n' % (role, scaling_usage)
                        else:
                            out.extra_xml += '    <disk-usage role="%s" usage=""/>\n' % (role)
                    out.extra_xml += "  </scaling-disk-usage>\n"

        finally:
            self.__mgr.stop_workers()
コード例 #3
0
node = None
try:
    # Chec that HBase big data is initialized
    if not bigdata.is_initialized():
        out.error("Big data storage not initialized.")
        sys.exit(1)

    # List roles
    cc = bigdata.create_config_context(options)
    roles = []
    if cc.type == "hbase":
        (roles, counts) = hbase.generate_role_list(cc, node_count)        
    elif cc.type == "hadoop":
        (roles, counts) = hbase.generate_role_list(cc, node_count)        
    elif cc.type == "mongodb":
        (roles, counts) = mongodb.generate_role_list(cc, node_count)

    # Generate XML
    xml =  "  <roles>\n"
    out.info('The node roles will be assigned according to the included list:')
    for role in roles:  
        xml += "    <role>%s</role>\n" % role
        if not options.xml:
            out.info("  %s" % role)
    xml += "  </roles>\n"
    out.extra_xml += xml
    out.role = ''
        

except MgmtException as e:
    out.error(str(e))