Beispiel #1
0
def get_cluster_host_list_logic(cluster_name, is_host_info=''):
    """
    GET 请求集群节点信息处理逻辑
    :return: resp, status
              resp: json格式的响应数据
              status: 响应码
    """
    data = []
    status = ''
    message = ''
    resp = {"status": status, "data": data, "message": message}
    if cluster_name == 'default':
        sfo_collect_hosts = SfoHostInfoMethod.query_last_host_info_list()
        used_hosts = db.session.query(SfoClusterNodes).filter(
            SfoClusterNodes.cluster_name != '').all()
        if used_hosts:
            used_hosts = map(lambda x: x.node_host_name.lower(), used_hosts)
        if sfo_collect_hosts:
            disused_hosts = filter(
                lambda x: x.host_name.lower() not in used_hosts,
                sfo_collect_hosts)
        else:
            disused_hosts = []
        will_used_hosts = []
        if disused_hosts:
            for host in disused_hosts:
                sfo_host = SfoClusterNodesMethod.create_or_update(
                    host.host_name, '', '', '', '')
                if sfo_host:
                    db.session.add(sfo_host)
                    will_used_hosts.append(sfo_host)
            db.session.commit()
        last_host_info_set = will_used_hosts
        if is_host_info:
            for host in last_host_info_set:
                sfo_host = SfoHostInfoMethod.query_host_info_by_host_name(
                    host.node_host_name)
                sfo_host_deepcopy = deepcopy(sfo_host)
                host.host_info = sfo_host_deepcopy
    else:
        last_host_info_set = SfoClusterNodesMethod.query_host_list_by_cluster_name(
            cluster_name)
        if last_host_info_set:
            last_host_info_set = filter(lambda x: x.node_stat == '1',
                                        last_host_info_set)
            if is_host_info:
                for host in last_host_info_set:
                    sfo_host = SfoHostInfoMethod.query_host_info_by_host_name(
                        host.node_host_name)
                    sfo_host_deepcopy = deepcopy(sfo_host)
                    host.host_info = sfo_host_deepcopy
    if last_host_info_set:
        status = 200
        message = 'OK'
        data = last_host_info_set
    else:
        status = 404
        message = 'Not Found Record HostList By %s' % cluster_name
    resp.update({"status": status, "data": data, "message": message})
    return resp, status
Beispiel #2
0
def cluster_swift_config_logic(hostname, filename=None):
    """
    :return:
    """
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    host = SfoClusterNodesMethod.query_host_by_host_name(hostname)
    if not host:
        raise ValueError('not Node hostname is %s' % hostname)
    if filename:
        node_man = manager_class.NodeManager(host.node_inet_ip)
        content = node_man.swift_config.read_config(
            config_path=os.path.dirname(filename),
            config_file=os.path.basename(filename))
        config = ConfigParser()
        config.read_string(content)
        config_dict = OrderedDict()
        try:
            if config.defaults():
                default_config = config.defaults()
                config_dict[config.default_section] = default_config
            for section, option in config.items():
                if config.has_section(section):
                    section_copy = config._sections[section].copy()
                    config_dict[section] = section_copy
        except NoSectionError, error:
            access_logger.error('get exception %s from swift config' %
                                str(error))
        status = 200
        message = 'OK'
        data = {"config": config_dict}
Beispiel #3
0
def get_storage_net_used_logic(cluster_name, starttime, endtime):
    data = {}
    status = ''
    message = ''
    storage = {}
    resp = {"status": status, "data": data, "message": message}
    proxy_node_list, storage_node_list = SfoClusterNodesMethod.category_node_list(
        cluster_name)  # 获取proxy、storage节点主机列表
    if storage_node_list:
        try:
            send_bytes, recv_bytes, add_time = server_net_used(
                storage_node_list, starttime, endtime)
            if add_time:
                status = 200
                message = 'OK'
                storage.update({
                    'send_bytes': send_bytes,
                    "recv_bytes": recv_bytes,
                    "add_time": add_time
                })
            else:
                status = 404
                message = 'Storage Node Not Found Record'
        except Exception, error:
            status = 501
            message = 'get exception %s from storage net used' % str(error)
Beispiel #4
0
def cluster_total_proxy_ops(cluster_name):
    """
    集群的预估iops值 与cpu core num 有关, 集群下Proxy节点的cpu核数* 单个(cpu核数能力)80 * 0.8
    单个能力 : 48核的cpu 预估ops是4000 ,单个由此计算
    公式 4000/48 * 0.8 * 集群proxy节点的总cpu核数
    :param cluster_name:
    :return:
    """
    sfo_proxy_nodes = []
    sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
        cluster_name)
    for node in sfo_nodes:
        try:
            node_role_js = json.loads(node.node_role)
            if isinstance(node_role_js,
                          dict) and node_role_js.get('Proxy-Server') == 'YES':
                sfo_proxy_nodes.append(node)
        except ValueError:
            continue
    _estimate_proxy_ops_total = 0
    for node in sfo_proxy_nodes:
        sfo_node = SfoHostInfoMethod.query_host_info_by_host_name(
            node.node_host_name)
        if sfo_node:
            _estimate_proxy_ops_total += int(sfo_node.cpu_cores) * 80
    estimate_proxy_ops_total = _estimate_proxy_ops_total * 0.8
    return estimate_proxy_ops_total
Beispiel #5
0
def rebalance(cluster_name, datajson):
    """
    平衡环
    :param datajson:
    :return:
    """
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    policy_num = ''
    ring_name = datajson.get('ring_name')
    ring_name = ring_name if ring_name.endswith(
        '.ring.gz') else ring_name + '.ring.gz'
    if 'object-' in ring_name:
        obj_ring, policy_num = ring_name.rstrip('.ring.gz').split('-')
    sfo_clu_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
        cluster_name)
    if not sfo_clu_nodes:
        raise ValueError('Not Master Node in %s' % cluster_name)
    # 建立task 任务
    rm = RingManager(cluster_name)
    try:
        if not policy_num:
            ring_name = ring_name.split('.')[0]
            content = rm.rebalance(ring_name=ring_name)
        else:
            content = rm.rebalance(policy_num=policy_num, policy=True)
    except Exception, error:
        status = 501
        message = str(error)
Beispiel #6
0
def change_cluster_nodesrv_logic(host_name, param):
    status, message = '', ''
    resp = {"status": status, "message": message}
    sfo_clu_node = SfoClusterNodesMethod.query_host_by_host_name(host_name)
    if sfo_clu_node:
        ip = sfo_clu_node.node_inet_ip
        action = param['action']
        service = param['service']
        client = ClusterClient(
            host=ip,
            port=7201,
            message="systemctl %s openstack-swift-%s.service" %
            (action, service.replace('_', '-')))
        asyncore.loop()
        if client.buffer == 'SUCCESS':
            sfo_server = SfoNodeServiceMethod.query_node_srv_by_host_name(
                host_name)
            setattr(sfo_server, 'srv_' + param['service'],
                    action_status_map[action])
            db.session.add(sfo_server)
            db.session.commit()
            status = 200
            message = 'OK'
        else:
            status = 400
            message = 'Operation Fail'
    else:
        status = 404
        message = 'Not Found Record'
    resp.update({"status": status, "message": message})
    return resp, status
Beispiel #7
0
def standard_system(cluster_name, taskid=None):
    sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
        cluster_name)
    sfo_nodes = filter(
        lambda x: not (json.loads(x.node_role)['Proxy-Server'] == 'YES' and
                       json.loads(x.node_role)['Container-Server'] == 'NO' and
                       json.loads(x.node_role)['Account-Server'] == 'NO' and
                       json.loads(x.node_role)['Object-Server'] == 'NO'),
        sfo_nodes)
    cm = ClusterManager()
    if len(sfo_nodes) > 0:
        try:
            for sfo_node in sfo_nodes:
                systemd_zip_path = os.path.join(config.sfo_server_temp_file,
                                                'standard')
                content = give_away_file_to_host(os.path.join(
                    systemd_zip_path, 'openstack-swift-systemd.zip'),
                                                 sfo_node.node_host_name,
                                                 '/usr/lib/systemd/system',
                                                 taskid=taskid)
                content = cm.excute_cmd(
                    message=
                    'unzip -o -d /usr/lib/systemd/system/ /usr/lib/systemd/system/openstack-swift-systemd.zip',
                    host=sfo_node.node_inet_ip)
                content = cm.excute_cmd(
                    message=
                    'rm -f /usr/lib/systemd/system/openstack-swift-systemd.zip',
                    host=sfo_node.node_inet_ip)
                content = cm.excute_cmd(message='systemctl daemon-reload',
                                        host=sfo_node.node_inet_ip)
                message = content
        except Exception, error:
            message = str(error)
            assert False
        finally:
Beispiel #8
0
def add_node_2cluster(nodejson):
    """
    添加节点到数据库
    :param nodejson:
    :return:
    """
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    node_role = nodejson.get('node_role')
    host_ip = nodejson.get('node_inet_ip')
    cluster_name = nodejson.get('cluster_name', 'default')
    replication_ip = nodejson.get('node_replicate_ip')
    host_name = nodejson.get('node_host_name')
    auto_install_srv = nodejson.get('auto_install_srv')
    username = '******'
    services = host_name
    operation = 'add_node'
    service_type = 'node_manager'
    try:
        if nodejson:
            sfo_cluster_node = SfoClusterNodesMethod.create_or_update(node_host_name=host_name,
                                                                      node_inet_ip=host_ip,
                                                                      node_role=node_role,
                                                                      node_replicate_ip=replication_ip,
                                                                      cluster_name=cluster_name)
            if sfo_cluster_node:
                db.session.add(sfo_cluster_node)
                sfo_task = SfoTasksListMethod.create_or_update_task(service_type=service_type,
                                                                    operation=operation,
                                                                    hostname=host_name,
                                                                    username=username,
                                                                    service_name=services
                                                                    )
                db.session.add(sfo_task)
                taskid = sfo_task.guid
                if auto_install_srv:
                    node_role = json.loads(node_role)
                    node_services_set = set(
                        map(lambda x: x.lower().split('-')[0], filter(lambda x: node_role[x] == 'YES', node_role)))
                    for srv in node_services_set:
                        scheduler.add_service(args=(sfo_cluster_node, srv, taskid))
                        if srv == 'proxy':
                            scheduler.add_service(args=(sfo_cluster_node, 'memcached', taskid))
                status = 200
                message = 'OK'
            else:
                status = 202
                message = 'OK'
        else:
            status = 501
            message = 'NULL VALUE %s'%nodejson
    except Exception as ex:
        status = 502
        message = str(ex)
    finally:
        db.session.commit()
        resp.update({"status": status, "message": message})
        return resp,status
Beispiel #9
0
def add_disk_cluster(cluster_name, datajson):
    """
        向环中添加磁盘
        :param ringjson:
        :return:
        """
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    policy_num = ''
    ip = datajson.get('ip')
    port = datajson.get('port')
    zone = datajson.get('zone')
    device = datajson.get('device')
    weight = datajson.get('weight')
    region = datajson.get('region')
    ring_name = datajson.get('ring_name')
    ring_name = ring_name if ring_name.endswith(
        '.ring.gz') else ring_name + '.ring.gz'
    if 'object-' in ring_name:
        obj_ring, policy_num = ring_name.rstrip('.ring.gz').split('-')
    replication_ip = datajson.get('replication_ip')
    replication_port = datajson.get('replication_port')
    sfo_clu_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
        cluster_name)
    if not sfo_clu_nodes:
        raise ValueError('Not Master Node in %s' % cluster_name)
    # 建立task 任务
    rm = RingManager(cluster_name)
    try:
        if not policy_num:
            ring_name = ring_name.split('.')[0]
            content = rm.add_disk_2_ring(ring_name=ring_name,
                                         region=region,
                                         zone=zone,
                                         ip=ip,
                                         port=port,
                                         disk_device=device,
                                         weight=weight,
                                         replication_ip=replication_ip,
                                         replication_port=replication_port)
        else:
            content = rm.add_disk_2_ring(region=region,
                                         zone=zone,
                                         ip=ip,
                                         port=port,
                                         disk_device=device,
                                         weight=weight,
                                         replication_ip=replication_ip,
                                         replication_port=replication_port,
                                         policy=True,
                                         policy_num=policy_num)

    except Exception, error:
        status = 501
        message = str(error)
Beispiel #10
0
def get_requests_count_logic(cluster_name, start_time, end_time):
    """
    GET 请求集群信息列表处理逻辑
    :param start_time: str  字符串日期格式
    :param end_time: str  字符串日期格式
    :return: resp, status
              resp: json格式的响应数据
              status: 响应码
    """
    data = {"request_stats": [], "add_times": []}
    status = ''
    message = ''
    resp = {"status": status, "data": data, "message": message}
    if is_less_than_nhours(start_time, end_time, 1):
        request_stats = SfoProxyStatsDMethod.query_proxt_stat_st2et(
            start_time, end_time)
    elif is_less_than_nhours(start_time, end_time, 8):
        request_stats = SfoProxyStatsD5MinMethod.query_proxt_stat_st2et(
            start_time, end_time)
    elif is_less_than_nhours(start_time, end_time, 24):
        request_stats = SfoProxyStatsDHourMethod.query_proxt_stat_st2et(
            start_time, end_time)
    else:
        request_stats = SfoProxyStatsDDayMethod.query_proxt_stat_st2et(
            start_time, end_time)
    sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
        cluster_name=cluster_name)
    for stat in request_stats:
        add_time = stat.add_time
        hostname_httpmethod_map = proxy_server_stat(stat.req_timing)
        for host_name in hostname_httpmethod_map.keys():
            if host_name.upper() not in [
                    node.node_host_name.upper() for node in sfo_nodes
            ]:
                hostname_httpmethod_map.pop(host_name)
        if hostname_httpmethod_map.values():
            data['request_stats'].append(
                reduce(lambda x, y: sum_times_group_by_httpmethod(x, y),
                       hostname_httpmethod_map.values()))
            data['add_times'].append(add_time)
    if request_stats:
        status = 200
        message = 'OK'
        if data['request_stats']:
            data['request_stats'] = dict(
                zip(data['request_stats'][0].keys(),
                    zip(*map(lambda x: x.values(), data['request_stats']))))
        else:
            status = 404
            message = 'Not Found Record By %s' % cluster_name
    else:
        status = 404
        message = 'Not Found Record By %s' % cluster_name
    resp.update({"status": status, "data": data, "message": message})
    return resp, status
Beispiel #11
0
def get_node_list(cluster_name):
    """
    通过集群名获取当前集群中的节点列表
    :param cluster_name:
    :return:
    """
    data = []
    status = ''
    message = ''
    resp = {"status": status, "data": data, "message": message}
    try:
        if cluster_name:
            nodes = SfoClusterNodes.query.filter(SfoClusterNodes.cluster_name == cluster_name).all()
            if nodes:
                nodes = filter(lambda x: not x.node_stat != '1', nodes)
                if nodes:
                    data = nodes
                    status = 200
                    message = 'OK'
                else:
                    status = 404
                    message = 'No available node was found by %s cluster' % cluster_name
            else:
                status = 404
                message = 'No record was found by %s cluster'%cluster_name
        else:
            sfo_collect_hosts = SfoHostInfoMethod.query_last_host_info_list()
            used_hosts = db.session.query(SfoClusterNodes).filter(SfoClusterNodes.cluster_name != '').all()
            if used_hosts:
                used_hosts = map(lambda x: x.node_host_name.lower(), used_hosts)
            if sfo_collect_hosts:
                disused_hosts = filter(lambda x: x.host_name.lower() not in used_hosts, sfo_collect_hosts)
            else:
                disused_hosts = []
            will_used_hosts = []
            if disused_hosts:
                for host in disused_hosts:
                    sfo_host = SfoClusterNodesMethod.create_or_update(host.host_name, '', '', '', '')
                    if sfo_host:
                        db.session.add(sfo_host)
                        will_used_hosts.append(sfo_host)
                db.session.commit()
                data = will_used_hosts
                status = 200
                message = 'OK'
            else:
                status = 404
                message = 'There is no enough node'
    except Exception as ex:
        status = 502
        message = str(ex)
    finally:
        resp.update({"status": status, "data": data, "message": message})
        return resp,status
Beispiel #12
0
 def mount_all_node_disks():
     sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
         cluster_name)
     if sfo_nodes:
         sfo_nodes = filter(
             lambda x: json.loads(x.node_role)['Account-Server'] == 'YES' or
             json.loads(x.node_role)['Container-Server'] == 'YES' or json.
             loads(x.node_role)['Object-Server'] == 'YES', sfo_nodes)
         for node in sfo_nodes:
             add_disk(node.node_inet_ip, node.node_replicate_ip,
                      node.node_host_name, '', taskid)
Beispiel #13
0
def test_file_link(host_name):
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    node = SfoClusterNodesMethod.query_host_by_host_name(host_name)
    if node:
        try:
            fm = FileManager()
            content = fm.give_away_file_to_host(host_name, '/app/sfo/README.md','/tmp')
            status = 200
            message = content
        except Exception, error:
            message = str(error)
            raise ValueError(message)
Beispiel #14
0
 def install_relation_service():
     sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
         cluster_name)
     if sfo_nodes:
         for idy, sfo_node in enumerate(sfo_nodes):
             if sfo_node.cluster_name == cluster_name:
                 node_role = json.loads(sfo_node.node_role)
                 node_services = map(
                     lambda x: x.split('-')[0].lower(),
                     filter(lambda x: node_role[x] == 'YES', node_role))
                 for idx, srv in enumerate(node_services):
                     add_service(sfo_node, srv, taskid)
                     if srv == 'proxy':
                         add_service(sfo_node, 'memcached', taskid)
Beispiel #15
0
def test_cmd_link(host_name):
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    node = SfoClusterNodesMethod.query_host_by_host_name(host_name)
    if node:
        try:
            sop = ServiceOperation(node.node_inet_ip)
            content = sop.excute_cmd('date')
            status = 200
            message = content
        except Exception, error:
            message = str(error)
            raise ValueError(message)
Beispiel #16
0
def get_disk_performance_logic(cluster_name):
    data = ''
    status = ''
    message = ''
    disk_num_total = 0
    resp = {
        "status": status,
        "data": data,
        "message": message,
        "disk_num_total": disk_num_total
    }
    nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
        cluster_name=cluster_name)
    order_list = []
    for node in nodes:
        sfo_disk_per = []
        _sfo_disk_per = SfoDiskPerformMethod.query_last_diskinfo_by_hostname(
            node.node_host_name)
        for disk in _sfo_disk_per:
            if disk.disk_name not in map(lambda x: x.disk_name, sfo_disk_per):
                mx_disk = max(filter(lambda x: x.disk_name == disk.disk_name,
                                     _sfo_disk_per),
                              key=lambda x: x.add_time)
                sfo_disk_per.append(mx_disk)
                order_list.append(mx_disk)
    if not nodes:
        raise ValueError('Cluster Not Found nodes')
    order_list = sorted(order_list,
                        key=lambda x: float(x.disk_percent),
                        reverse=True)
    if order_list:
        status = 200
        message = 'OK'
        data = order_list
        disk_num_total = len(order_list)
    else:
        status = 404
        message = 'Not Found Record Cluster Disk Infomation By %s' % cluster_name
    resp.update({
        "status": status,
        "data": data,
        "message": message,
        'disk_num_total': disk_num_total
    })
    return resp, status
Beispiel #17
0
def get_alarm_historys_logic(starttime, endtime, page, limit):
    """
    GET 请求历史告警记录信息
    :return: resp, status
              resp: json格式的响应数据
              status: 响应码
    """
    data = {'alarm_total': 0, "alarms": []}
    status = ''
    message = ''
    resp = {"status": status, "data": data, "message": message}
    alarm_set = SfoAlarmLogMethod.group_by_alarm_device(page=int(page),
                                                        limit=int(limit),
                                                        starttime=starttime,
                                                        endtime=endtime)
    if alarm_set:
        data['alarm_total'] = alarm_set.total
        for alarm in alarm_set.items:
            sfo_alarm_logs = SfoAlarmLogMethod.query_by_alarm_device(
                alarm.alarm_device, starttime, endtime)
            if len(sfo_alarm_logs) > 0:
                critical_len = filter(lambda x: x.alarm_level == 'critical',
                                      sfo_alarm_logs)
                warn_len = filter(lambda x: x.alarm_level == 'warning',
                                  sfo_alarm_logs)
                sfo_cluster_node = SfoClusterNodesMethod.query_host_by_host_name(
                    alarm.hostname)
                alarm_info = {
                    "alarm": sfo_alarm_logs[0],
                    "total": len(sfo_alarm_logs),
                    "warning_total": len(warn_len),
                    "critical_total": len(critical_len)
                }
                if sfo_cluster_node and sfo_cluster_node.cluster_name:
                    alarm_info.update(
                        {"cluster_name": sfo_cluster_node.cluster_name})
                    alarm_info.update({"ip": sfo_cluster_node.node_inet_ip})
                data['alarms'].append(alarm_info)
        status = 200
        message = 'OK'
    else:
        status = 404
        message = 'Not Found Record'
    resp.update({"status": status, "data": data, "message": message})
    return resp, status
Beispiel #18
0
def create_cluster_nodesrv_detail_logic(host_name, param):
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    host = SfoClusterNodesMethod.query_host_by_host_name(host_name)
    if not host:
        raise ValueError('No Record by %s' % host_name)
    operation = param.get('operation')
    services = param.get('service')
    try:
        nodeman = NodeManager(host.node_inet_ip)
        if hasattr(nodeman.swift_service, operation):
            oper_fun = getattr(nodeman.swift_service, operation)
            content = oper_fun(services)
        else:
            raise AttributeError('%s not found' % operation)
    except Exception, error:
        status = 501
        message = str(error)
Beispiel #19
0
def umount_disk_2node(host_name, disk_name):
    """
    删除磁盘的操作
    :param host_name: 主机名
    :param disk_name: 磁盘名
    :return:
    """
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    sfo_node = SfoClusterNodesMethod.query_host_by_host_name(host_name)
    if not sfo_node:
        raise ValueError('Not Found Node Host %s' % host_name)
    disk_op = DiskOperation(sfo_node.node_inet_ip, sfo_node.node_replicate_ip)
    try:
        disk_name = disk_name if disk_name else 'all'
        content = disk_op.delete_disk(disk_name)
    except Exception, error:
        status = 501
        message = str(error)
Beispiel #20
0
def get_disk_list(cluster_name):
    """
    获取节点磁盘列表
    :param cluster_name:
    :return:
    """
    sfo_disks = []
    apply_result_list = []
    status = ''
    message = {}
    resp = {"status": status, "data": sfo_disks, "message": message}
    sfo_node = SfoClusterNodesMethod.query_host_list_by_cluster_name(cluster_name=cluster_name)
    rm = RingManager(cluster_name)
    try:
        ring_host_label_map = rm.used_map(cluster_name)
    except IOError:
        ring_host_label_map = {}
    pool = Pool(25)
    for node in sfo_node:
        apply_result = pool.apply_async(func=async_disk_operation, args=(node,))
        apply_result_list.append(apply_result)
    pool.close()
    pool.join()
    for apply_result in apply_result_list:
        apply_result_data = apply_result.get(timeout=1)
        if apply_result_data:
            for ring_name, ring_info in ring_host_label_map.items():
                for disk_infomation in apply_result_data:
                    for host_labels_dict in ring_info:
                        if (disk_infomation.host_ip == host_labels_dict['host_ip'] or disk_infomation.host_re_ip == host_labels_dict['host_ip']) and disk_infomation.label in host_labels_dict['labels']:
                            disk_infomation.is_used.append(ring_name)
            sfo_disks.extend(apply_result_data)
    if sfo_disks:
        status = 200
        message = 'OK'
        sfo_disks = sorted(sfo_disks, key=lambda x: x.host_name)
    else:
        status = 404
        message = 'Not Found Record'
    resp.update({"status": status, "data": sfo_disks, "message": message})
    return resp, status
Beispiel #21
0
def add_srv_2cluster(cluster_name, srvjson):
    """
    添加服务到数据库
    :param cluster_name:
    :param srvjson:
    :return:
    """
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    host_name = srvjson.get('host_name')
    service_name = srvjson.get('service_name')
    sfo_clu_node = SfoClusterNodesMethod.query_host_by_host_name(host_name)
    if not sfo_clu_node:
        raise ValueError('Not Found Node Host %s' % host_name)
    swift_op = SwiftServiceOperation(sfo_clu_node.node_inet_ip)
    try:
        content = swift_op.install_service(service_name)
    except Exception, error:
        status = 501
        message = str(error)
Beispiel #22
0
def add_ring_2cluster(cluster_name, ringjson):
    """
    添加环数据到数据库
    :param ringjson:
    :return:
    """
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    policy_num = ''
    replicas = ringjson.get('replicas')
    part_power = ringjson.get('part_power')
    ring_name = ringjson.get('ring_name')
    ring_name = ring_name if ring_name.endswith(
        '.ring.gz') else ring_name + '.ring.gz'
    if 'object-' in ring_name:
        obj_ring, policy_num = ring_name.rstrip('.ring.gz').split('-')
    min_part_hours = ringjson.get('min_part_hours')
    sfo_clu_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
        cluster_name)
    if not sfo_clu_nodes:
        raise ValueError('Not Master Node in %s' % cluster_name)
    rm = RingManager(cluster_name)
    try:
        if policy_num:
            content = rm.create(part_power=part_power,
                                replicas=replicas,
                                min_part_hours=min_part_hours,
                                policy=True,
                                policy_num=policy_num)

        else:
            ring_name = ring_name.split('.')[0]
            content = rm.create(ring_name=ring_name,
                                part_power=part_power,
                                replicas=replicas,
                                min_part_hours=min_part_hours)
    except Exception, error:
        status = 501
        message = str(error)
Beispiel #23
0
def reload_proxy_srv(cluster_name, referer, syscode, taskid=None):
    nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(cluster_name)
    nodes = filter(lambda x: json.loads(x.node_role)['Proxy-Server'] == 'YES',
                   nodes)
    content_list = []
    message_js = {
        "source": referer,
        "syscode": syscode,
    }
    for proxy in nodes:
        try:
            result = requests.get('http://%s:8080/reload/' %
                                  proxy.node_inet_ip,
                                  timeout=5)
            message = 'Reload Proxy IP:%s  result:%s' % (proxy.node_inet_ip,
                                                         result.status_code)
            access_logger.info(message)
        except IOError, error:
            message = 'Reload Proxy IP:%s  result:%s' % (proxy.node_inet_ip,
                                                         error)
            access_logger.error(message)
        finally:
Beispiel #24
0
def update_cluster_config(cluster_name, cluster_json):
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    sfo_clu = SfoClusterMethod.query_cluster_by_cluster_name(cluster_name)
    if sfo_clu:
        sfo_nodes = SfoClusterNodesMethod.query_not_used_hosts()
        if len(sfo_nodes) > 0:
            username = '******'
            operation = 'create'
            service_type = 'cluster manager'
            services = 'create %s cluster' % cluster_name
            aco_ring_json = cluster_json.get('account', {})
            ser_ip = cluster_json.get('statsd_host_ip', '')
            con_ring_json = cluster_json.get('container', {})
            obj_ring_json = cluster_json.get('object', {})
            proxy_json = cluster_json.get('proxy', {})
            sfo_task = SfoTasksListMethod.create_or_update_task(
                service_type=service_type,
                operation=operation,
                username=username,
                service_name=services)
            db.session.add(sfo_task)
            db.session.commit()
            scheduler.create_cluster(args=(cluster_name, sfo_task.guid, ser_ip,
                                           aco_ring_json, con_ring_json,
                                           obj_ring_json, proxy_json))
            status = 201
            message = 'Create OK'
            data = {'data': sfo_task.guid}
            resp.update(data)
        else:
            status = 200
            message = 'There is no enough node'
    else:
        status = 404
        message = "%s cluster doesn't exists" % cluster_name
    resp.update({"status": status, "message": message})
    return resp, status
Beispiel #25
0
def give_away_ring(cluster_name, datajson):
    """
        平衡环
        :param datajson:
        :return:
        """
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    ring_name = datajson.get('ring_name')
    ring_name = ring_name if ring_name.endswith(
        '.ring.gz') else ring_name + '.ring.gz'
    sfo_clu_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
        cluster_name)
    if not sfo_clu_nodes:
        raise ValueError('Not Master Node in %s' % cluster_name)
    rm = RingManager(cluster_name)
    try:
        content = rm.give_away_ring(cluster_name=cluster_name,
                                    ring_file=ring_name)
    except Exception, error:
        message = str(error)
        status = 501
Beispiel #26
0
def get_cluster_agent_logic(cluster_name, page, limit):
    """
    GET 请求集群Agent管理
    :return: resp, status
              resp: json格式的响应数据
              status: 响应码
    """
    data = {"agents_total": '', "agents": []}
    status = ''
    message = ''
    resp = {"status": status, "data": data, "message": message}
    sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(cluster_name)
    agents = BeatHeartInfoMethod.lived_agent_filter_cluster2(sfo_nodes, int(page), int(limit))
    if agents:
        status = 200
        message = 'OK'
        data['agents_total'] = agents.total
        data['agents'].extend(agents.items)
    else:
        status = 404
        message = 'Not Found Record Cluster Agent'
    resp.update({"status": status, "data": data, "message": message})
    return resp, status
Beispiel #27
0
def update_swift_config_logic(hostname, filename, **sections):
    """
    更新配置文件逻辑
    :param hostname:
    :param filename:
    :param sections:
    :return:
    """
    status = ''
    message = ''
    resp = {"status": status, "message": message}
    host = SfoClusterNodesMethod.query_host_by_host_name(hostname)
    if not host:
        raise ValueError('not Node hostname is %s' % hostname)
    node_man = manager_class.NodeManager(host.node_inet_ip)
    content = node_man.swift_config.read_config(
        config_path=os.path.dirname(filename),
        config_file=os.path.basename(filename))
    config = ConfigParser()
    config.read_string(content)
    for section in sections:
        if section.upper() == 'DEFAULT':
            for key, value in sections[section].items():
                if config.defaults().has_key(key):
                    config.defaults()[key] = value
                else:
                    config.defaults().setdefault(key, value)
        else:
            if config.has_section(section):
                for key, value in sections[section].items():
                    config.set(section=section, option=key, value=value)
            else:
                config.add_section(section)
                for key, value in sections[section].items():
                    config.set(section=section, option=key, value=value)
    if not os.path.exists(
            '%s/%s/' %
        (global_config.sfo_server_temp_file, host.node_inet_ip)):
        os.makedirs('%s/%s/' %
                    (global_config.sfo_server_temp_file, host.node_inet_ip))
    else:
        if os.path.exists(
                '%s/%s/%s' %
            (global_config.sfo_server_temp_file, host.node_inet_ip, filename)):
            shutil.copy(
                '%s/%s/%s' % (global_config.sfo_server_temp_file,
                              host.node_inet_ip, filename),
                '%s/%s/%s.bak' % (global_config.sfo_server_temp_file,
                                  host.node_inet_ip, filename))
    with open(
            '%s/%s/%s' %
        (global_config.sfo_server_temp_file, host.node_inet_ip, filename),
            mode='w+') as fp:
        config.write(fp=fp)
    content = node_man.swift_config.write_config(os.path.basename(filename))
    copy_content = node_man.swift_config.copy(
        old_path='/tmp/sfo',
        config_path=os.path.dirname(filename),
        config_file=os.path.basename(filename))
    if content == 'Send File Success' and copy_content == 'Excute Cmd Success':
        status = 200
        message = 'OK'
    else:
        status = 500
        message = 'Update Config Fail %s %s' % (content, copy_content)
    resp.update({"status": status, "message": message})
    return resp, status
Beispiel #28
0
    def standard_ga_template():
        standard_system(cluster_name, taskid)
        sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
            cluster_name)
        if sfo_nodes:
            sfo_proxy_nodes = filter(
                lambda x: json.loads(x.node_role)['Proxy-Server'] == 'YES',
                sfo_nodes)
            sfo_storage_nodes = filter(
                lambda x: json.loads(x.node_role)['Account-Server'] == 'YES' or
                json.loads(x.node_role)['Container-Server'] == 'YES' or json.
                loads(x.node_role)['Object-Server'] == 'YES', sfo_nodes)
            for node in sfo_proxy_nodes:
                ser_op = ServiceOperation(node.node_inet_ip)
                ser_op.excute_cmd(
                    "sed -i 's/\"64\"/\"1024\"/g' /etc/sysconfig/memcached")
                ser_op.excute_cmd(
                    'sed -i \'$c OPTIONS="-l 0.0.0.0 -U 11211 -t 12 >> /var/log/memcached.log 2>&1"\' /etc/sysconfig/memcached'
                )

            for node in sfo_storage_nodes:
                systemd_zip_path = os.path.join(config.sfo_server_temp_file,
                                                'standard')
                give_away_file_to_host(os.path.join(systemd_zip_path,
                                                    'SwiftDiskMount.sh'),
                                       node.node_host_name,
                                       '/usr/bin',
                                       taskid=taskid)
                ser_op = ServiceOperation(node.node_inet_ip)
                ser_op.excute_cmd('chmod +x /usr/bin/SwiftDiskMount.sh')
                ser_op.excute_cmd(
                    'systemctl enable openstack-swift-disk-mount.service')

            sfo_memcached_proxy = map(lambda x: x.node_inet_ip + ':11211',
                                      sfo_proxy_nodes)
            for idy, sfo_node in enumerate(sfo_nodes):
                base_dir = config.sfo_server_temp_file
                abs_dir = os.path.join(base_dir, sfo_node.node_inet_ip)
                if not os.path.exists(abs_dir):
                    os.mkdir(abs_dir)
                filenames = create_new_config_file(
                    abs_dir, sfo_node, **{
                        "private_ip":
                        sfo_node.node_replicate_ip,
                        "public_ip":
                        sfo_node.node_inet_ip,
                        "ser_ip":
                        ser_ip,
                        "account_server_+hostname":
                        "account_server_%s" % sfo_node.node_host_name,
                        "object_server_+hostname":
                        "object_server_%s" % sfo_node.node_host_name,
                        "container_server_+hostname":
                        "container_server_%s" % sfo_node.node_host_name,
                        "proxy_server_+hostname":
                        "proxy_server_%s" % sfo_node.node_host_name,
                        "memcachehost1:11211,memcachehost2:11211,memcachehost3:11211":
                        ','.join(sfo_memcached_proxy)
                    })

                for idx, filename in enumerate(filenames):
                    target_file = os.path.join(abs_dir, filename)
                    if filename.startswith('account'):
                        cp_file_path = '/etc/swift/account-server'
                    elif filename.startswith('container'):
                        cp_file_path = '/etc/swift/container-server'
                    elif filename.startswith('object'):
                        cp_file_path = '/etc/swift/object-server'
                    elif filename.startswith('rsync'):
                        cp_file_path = '/etc'
                    elif filename == 'openstack-swift.conf':
                        cp_file_path = '/etc/rsyslog.d'
                    else:
                        cp_file_path = '/etc/swift'
                    give_away_file_to_host(target_file,
                                           sfo_node.node_host_name,
                                           cp_file_path, taskid)
Beispiel #29
0
def create_cluster(
    cluster_name,
    taskid,
    ser_ip,
    account_ring_json,
    con_ring_json,
    obj_ring_json,
    proxy_json,
):
    from sfo_utils.socket_utils import LocalProcessSocketClient
    sfo_nodes = SfoClusterNodesMethod.query_not_used_hosts()
    auth_switch = SfoCofigureMethod.query_value_from_con_key('AUTH_SWITCH')
    admin_user = SfoCofigureMethod.query_value_from_con_key(
        'KEYSTONE_USERNAME')
    admin_password = SfoCofigureMethod.query_value_from_con_key(
        'KEYSTONE_PASSWORD')
    for sfo_node in sfo_nodes:
        enable_cluster = False
        node_role = {
            "Proxy-Server": "NO",
            "Container-Server": "NO",
            "Account-Server": "NO",
            "Object-Server": "NO"
        }
        for proxy_node in proxy_json['nodes']:
            if sfo_node.node_host_name == proxy_node['host_name']:
                node_role.update({"Proxy-Server": "YES"})
                sfo_node.node_inet_ip = proxy_node['ip']
                enable_cluster = True
        for account_node in account_ring_json['nodes']:
            if sfo_node.node_host_name == account_node['host_name']:
                node_role.update({"Account-Server": "YES"})
                sfo_node.node_inet_ip = account_node['ip']
                sfo_node.node_replicate_ip = account_node["replication_ip"]
                enable_cluster = True

        for con_node in con_ring_json['nodes']:
            if sfo_node.node_host_name == con_node['host_name']:
                node_role.update({"Container-Server": "YES"})
                sfo_node.node_inet_ip = con_node['ip']
                sfo_node.node_replicate_ip = con_node["replication_ip"]
                enable_cluster = True

        for obj_node in obj_ring_json['nodes']:
            if sfo_node.node_host_name == obj_node['host_name']:
                node_role.update({"Object-Server": "YES"})
                sfo_node.node_inet_ip = obj_node['ip']
                sfo_node.node_replicate_ip = obj_node["replication_ip"]
                enable_cluster = True

        if enable_cluster:
            sfo_node.node_role = json.dumps(node_role, encoding='utf-8')
            sfo_node.cluster_name = cluster_name
    db.session.commit()

    def install_relation_service():
        sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
            cluster_name)
        if sfo_nodes:
            for idy, sfo_node in enumerate(sfo_nodes):
                if sfo_node.cluster_name == cluster_name:
                    node_role = json.loads(sfo_node.node_role)
                    node_services = map(
                        lambda x: x.split('-')[0].lower(),
                        filter(lambda x: node_role[x] == 'YES', node_role))
                    for idx, srv in enumerate(node_services):
                        add_service(sfo_node, srv, taskid)
                        if srv == 'proxy':
                            add_service(sfo_node, 'memcached', taskid)

    def standard_ga_template():
        standard_system(cluster_name, taskid)
        sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
            cluster_name)
        if sfo_nodes:
            sfo_proxy_nodes = filter(
                lambda x: json.loads(x.node_role)['Proxy-Server'] == 'YES',
                sfo_nodes)
            sfo_storage_nodes = filter(
                lambda x: json.loads(x.node_role)['Account-Server'] == 'YES' or
                json.loads(x.node_role)['Container-Server'] == 'YES' or json.
                loads(x.node_role)['Object-Server'] == 'YES', sfo_nodes)
            for node in sfo_proxy_nodes:
                ser_op = ServiceOperation(node.node_inet_ip)
                ser_op.excute_cmd(
                    "sed -i 's/\"64\"/\"1024\"/g' /etc/sysconfig/memcached")
                ser_op.excute_cmd(
                    'sed -i \'$c OPTIONS="-l 0.0.0.0 -U 11211 -t 12 >> /var/log/memcached.log 2>&1"\' /etc/sysconfig/memcached'
                )

            for node in sfo_storage_nodes:
                systemd_zip_path = os.path.join(config.sfo_server_temp_file,
                                                'standard')
                give_away_file_to_host(os.path.join(systemd_zip_path,
                                                    'SwiftDiskMount.sh'),
                                       node.node_host_name,
                                       '/usr/bin',
                                       taskid=taskid)
                ser_op = ServiceOperation(node.node_inet_ip)
                ser_op.excute_cmd('chmod +x /usr/bin/SwiftDiskMount.sh')
                ser_op.excute_cmd(
                    'systemctl enable openstack-swift-disk-mount.service')

            sfo_memcached_proxy = map(lambda x: x.node_inet_ip + ':11211',
                                      sfo_proxy_nodes)
            for idy, sfo_node in enumerate(sfo_nodes):
                base_dir = config.sfo_server_temp_file
                abs_dir = os.path.join(base_dir, sfo_node.node_inet_ip)
                if not os.path.exists(abs_dir):
                    os.mkdir(abs_dir)
                filenames = create_new_config_file(
                    abs_dir, sfo_node, **{
                        "private_ip":
                        sfo_node.node_replicate_ip,
                        "public_ip":
                        sfo_node.node_inet_ip,
                        "ser_ip":
                        ser_ip,
                        "account_server_+hostname":
                        "account_server_%s" % sfo_node.node_host_name,
                        "object_server_+hostname":
                        "object_server_%s" % sfo_node.node_host_name,
                        "container_server_+hostname":
                        "container_server_%s" % sfo_node.node_host_name,
                        "proxy_server_+hostname":
                        "proxy_server_%s" % sfo_node.node_host_name,
                        "memcachehost1:11211,memcachehost2:11211,memcachehost3:11211":
                        ','.join(sfo_memcached_proxy)
                    })

                for idx, filename in enumerate(filenames):
                    target_file = os.path.join(abs_dir, filename)
                    if filename.startswith('account'):
                        cp_file_path = '/etc/swift/account-server'
                    elif filename.startswith('container'):
                        cp_file_path = '/etc/swift/container-server'
                    elif filename.startswith('object'):
                        cp_file_path = '/etc/swift/object-server'
                    elif filename.startswith('rsync'):
                        cp_file_path = '/etc'
                    elif filename == 'openstack-swift.conf':
                        cp_file_path = '/etc/rsyslog.d'
                    else:
                        cp_file_path = '/etc/swift'
                    give_away_file_to_host(target_file,
                                           sfo_node.node_host_name,
                                           cp_file_path, taskid)

    def mount_all_node_disks():
        sfo_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
            cluster_name)
        if sfo_nodes:
            sfo_nodes = filter(
                lambda x: json.loads(x.node_role)['Account-Server'] == 'YES' or
                json.loads(x.node_role)['Container-Server'] == 'YES' or json.
                loads(x.node_role)['Object-Server'] == 'YES', sfo_nodes)
            for node in sfo_nodes:
                add_disk(node.node_inet_ip, node.node_replicate_ip,
                         node.node_host_name, '', taskid)

    def create_rings():
        rings = ['account', 'container', 'object']
        for idy, i in enumerate(rings):
            if i == 'account':
                create_ring(cluster_name, 'account',
                            account_ring_json['part_power'],
                            account_ring_json['replicas'],
                            account_ring_json['min_part_hours'], '', taskid)
            elif i == 'container':
                create_ring(cluster_name, 'container',
                            con_ring_json['part_power'],
                            con_ring_json['replicas'],
                            con_ring_json['min_part_hours'], '', taskid)
            else:
                create_ring(cluster_name, 'object',
                            obj_ring_json['part_power'],
                            obj_ring_json['replicas'],
                            obj_ring_json['min_part_hours'], '', taskid)

    def add_all_disk_to_rings():
        for host_dict in account_ring_json['nodes']:
            ip = host_dict['ip']
            port = host_dict['port']
            zone = host_dict['zone']
            region = host_dict['region']
            host_name = host_dict['host_name']
            replication_ip = host_dict['replication_ip']
            replication_port = host_dict['replication_port']
            do = DiskOperation(ip, replication_ip)
            _, mount_disks = do.mounted_disks()
            for idx, disk in enumerate(mount_disks):
                _dist_mt_info = disk.strip()
                disk_mt_info_list = _dist_mt_info.split(' ')
                if len(disk_mt_info_list) >= 5:
                    disk_name = disk_mt_info_list[0]
                    label = disk_mt_info_list[4].replace('[',
                                                         '').replace(']', '')
                    sfo_disk_per = SfoHostInfoMethod.query_host_info_by_host_name(
                        host_name)
                    disk_total = json.loads(
                        sfo_disk_per.disk_useful_size)[disk_name]
                    disk_total_bytes = reverse_unit(disk_total)
                    weight = weight_con(disk_total_bytes)
                    add_disk_2_ring(taskid=taskid,
                                    ring_name='account',
                                    region=region,
                                    zone=zone,
                                    ip=ip,
                                    port=port,
                                    disk_device=label,
                                    weight=weight,
                                    replication_ip=replication_ip,
                                    replication_port=replication_port,
                                    cluster_name=cluster_name)

        for host_dict in con_ring_json['nodes']:
            ip = host_dict['ip']
            port = host_dict['port']
            zone = host_dict['zone']
            region = host_dict['region']
            host_name = host_dict['host_name']
            replication_ip = host_dict['replication_ip']
            replication_port = host_dict['replication_port']
            do = DiskOperation(ip, replication_ip)
            _, mount_disks = do.mounted_disks()
            for idx, disk in enumerate(mount_disks):
                _dist_mt_info = disk.strip()
                disk_mt_info_list = _dist_mt_info.split(' ')
                if len(disk_mt_info_list) >= 5:
                    disk_name = disk_mt_info_list[0]
                    label = disk_mt_info_list[4].replace('[',
                                                         '').replace(']', '')
                    sfo_disk_per = SfoHostInfoMethod.query_host_info_by_host_name(
                        host_name)
                    disk_total = json.loads(
                        sfo_disk_per.disk_useful_size)[disk_name]
                    disk_total_bytes = reverse_unit(disk_total)
                    weight = weight_con(disk_total_bytes)
                    add_disk_2_ring(taskid=taskid,
                                    ring_name='container',
                                    region=region,
                                    zone=zone,
                                    ip=ip,
                                    port=port,
                                    disk_device=label,
                                    weight=weight,
                                    replication_ip=replication_ip,
                                    replication_port=replication_port,
                                    cluster_name=cluster_name)

        for host_dict in obj_ring_json['nodes']:
            ip = host_dict['ip']
            port = host_dict['port']
            zone = host_dict['zone']
            region = host_dict['region']
            host_name = host_dict['host_name']
            replication_ip = host_dict['replication_ip']
            replication_port = host_dict['replication_port']
            do = DiskOperation(ip, replication_ip)
            _, mount_disks = do.mounted_disks()
            for idx, disk in enumerate(mount_disks):
                _dist_mt_info = disk.strip()
                disk_mt_info_list = _dist_mt_info.split(' ')
                if len(disk_mt_info_list) >= 5:
                    disk_name = disk_mt_info_list[0]
                    label = disk_mt_info_list[4].replace('[',
                                                         '').replace(']', '')
                    sfo_disk_per = SfoHostInfoMethod.query_host_info_by_host_name(
                        host_name)
                    disk_total = json.loads(
                        sfo_disk_per.disk_useful_size)[disk_name]
                    disk_total_bytes = reverse_unit(disk_total)
                    weight = weight_con(disk_total_bytes)
                    add_disk_2_ring(taskid=taskid,
                                    ring_name='object',
                                    region=region,
                                    zone=zone,
                                    ip=ip,
                                    port=port,
                                    disk_device=label,
                                    weight=weight,
                                    replication_ip=replication_ip,
                                    replication_port=replication_port,
                                    cluster_name=cluster_name)

    def rebalance_ring():
        # # 监听创建环后执行添加磁盘
        # # # 添加磁盘到环
        rebalance('account', '', cluster_name, taskid)
        rebalance('container', '', cluster_name, taskid)
        rebalance('object', '', cluster_name, taskid)

    def ga_ring():
        give_away_ring('account.ring.gz', cluster_name, taskid)
        give_away_ring('container.ring.gz', cluster_name, taskid)
        give_away_ring('object.ring.gz', cluster_name, taskid)

    install_function_list = [
        install_relation_service, standard_ga_template, mount_all_node_disks,
        create_rings, add_all_disk_to_rings, rebalance_ring, ga_ring
    ]

    local_soc_client = LocalProcessSocketClient(host='127.0.0.1', port=54444)
    for step, func in enumerate(install_function_list):
        try:
            step += 1
            func()
        except AssertionError:
            local_soc_client.send(
                json.dumps({
                    "taskid":
                    taskid,
                    "float_percent":
                    round(float(step) / float(len(install_function_list)), 2),
                    "status":
                    500
                }))

            break
        else:
            local_soc_client.send(
                json.dumps({
                    "taskid":
                    taskid,
                    "float_percent":
                    round(float(step) / float(len(install_function_list)), 2),
                    "status":
                    200
                }))
Beispiel #30
0
def get_cluster_detail_logic(cluster_name, start_time, end_time):
    """
    GET 请求集群信息列表处理逻辑
    :param start_time: str  字符串日期格式
    :param end_time: str  字符串日期格式
    :return: resp, status
              resp: json格式的响应数据
              status: 响应码
    """

    status = ''
    message = ''
    abnormal_dic = {"proxy": [], 'account': [], 'container': [], 'object': []}
    abnormal_node = {
        "node_online": 0,
        "node_outline": 0,
        "node_total": 0,
        "outline_nodes": []
    }
    cluster_capacity_info = {"apply_system_total": 0, "apply_systems_info": []}
    estimate_proxy_ops_total = cluster_total_proxy_ops(cluster_name)
    data = {
        "cluster_physical": "",
        "cluster_virtual": "",
        "cluster_ops_24h_ago": "",
        "band_width_24h_ago": "",
        "abnormal": abnormal_dic,
        "cluster_capacity_info": cluster_capacity_info,
        "cluster_proxy_total_ops": estimate_proxy_ops_total,
        "cluster_node": abnormal_node
    }
    resp = {"status": status, "data": data, "message": message}
    instances = SfoClusterInfoMethod.query_last_cluster_overview(cluster_name)
    yesterady_cluster_set = SfoClusterInfoMethod.query_start2end_region_list_info(
        cluster_name, start_time, end_time)
    cluster_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
        cluster_name)
    sfo_cluster_beatheart = BeatHeartInfoMethod.lived_agent_filter_cluster2(
        cluster_nodes)
    sfo_aco_man = SfoAccountManagerMethod.query_systems(cluster_name)
    if instances:
        status = 200
        message = 'OK'
        proxy_json = json.loads(instances.proxy_num)
        storage_json = json.loads(instances.storage_num)
        current_time = time.time()
        if len(cluster_nodes) >= len(sfo_cluster_beatheart):
            for node_beatheart in sfo_cluster_beatheart:
                if current_time - strft_2_timestamp(
                        node_beatheart.add_time) > 180:
                    abnormal_node['outline_nodes'].append(
                        node_beatheart.hostname)
            else:
                abnormal_node['node_total'] = len(cluster_nodes)
                abnormal_node['node_outline'] = len(
                    abnormal_node['outline_nodes']) + (
                        len(cluster_nodes) - len(sfo_cluster_beatheart))
                abnormal_node['node_online'] = len(
                    sfo_cluster_beatheart) - len(
                        abnormal_node['outline_nodes'])

            for node in cluster_nodes:
                try:
                    map(lambda x: x.hostname,
                        sfo_cluster_beatheart).index(node.node_host_name)
                except (IndexError, ValueError):
                    abnormal_node['outline_nodes'].append(node.node_host_name)

        if proxy_json and storage_json:
            if int(proxy_json['proxy_online']) < int(proxy_json['proxy_total']) or \
                    int(storage_json['account_online']) < int(storage_json['account_num']) or \
                    int(storage_json['container_online']) < int(storage_json['container_num']) or \
                    int(storage_json['object_online']) < int(storage_json['object_num']):
                sfo_cluster_nodes = SfoClusterNodesMethod.query_host_list_by_cluster_name(
                    cluster_name)
                sfo_node_srv = SfoNodeServiceMethod.query_node_srv_last_info()
                sfo_cluster_node_srv = filter(
                    lambda x: x.host_name in
                    [node.node_host_name for node in sfo_cluster_nodes],
                    sfo_node_srv)
                if int(proxy_json['proxy_online']) < int(
                        proxy_json['proxy_total']):
                    sfo_proxy_nodes = []
                    for node in sfo_cluster_nodes:
                        try:
                            node_role_js = json.loads(node.node_role)
                            if isinstance(node_role_js,
                                          dict) and node_role_js.get(
                                              'Proxy-Server') == 'YES':
                                sfo_proxy_nodes.append(node)
                        except ValueError:
                            continue
                    sfo_proxy_node_srv = filter(
                        lambda x: x.host_name in
                        [node.node_host_name for node in sfo_proxy_nodes],
                        sfo_cluster_node_srv)
                    for node_srv in sfo_proxy_node_srv:
                        if current_time - strft_2_timestamp(
                                node_srv.add_time) < 60 and (
                                    node_srv.srv_proxy != 'running'
                                    or node_srv.srv_proxy == 'stopped'):
                            abnormal_dic['proxy'].append(node_srv)
                if int(storage_json['account_online']) < int(
                        storage_json['account_num']):
                    sfo_acc_nodes = []
                    for node in sfo_cluster_nodes:
                        try:
                            node_role_js = json.loads(node.node_role)
                            if isinstance(node_role_js,
                                          dict) and node_role_js.get(
                                              'Account-Server') == 'YES':
                                sfo_acc_nodes.append(node)
                        except ValueError:
                            continue
                    sfo_acc_node_srv = filter(
                        lambda x: x.host_name in
                        [node.node_host_name for node in sfo_acc_nodes],
                        sfo_cluster_node_srv)
                    for node_srv in sfo_acc_node_srv:
                        if current_time - strft_2_timestamp(
                                node_srv.add_time) < 60 and (
                                    node_srv.srv_account != 'running'
                                    or node_srv.srv_account == 'stopped'):
                            abnormal_dic['account'].append(node_srv)
                if int(storage_json['container_online']) < int(
                        storage_json['container_num']):
                    sfo_con_nodes = []
                    for node in sfo_cluster_nodes:
                        try:
                            node_role_js = json.loads(node.node_role)
                            if isinstance(node_role_js,
                                          dict) and node_role_js.get(
                                              'Container-Server') == 'YES':
                                sfo_con_nodes.append(node)
                        except ValueError:
                            continue
                    sfo_con_node_srv = filter(
                        lambda x: x.host_name in
                        [node.node_host_name for node in sfo_con_nodes],
                        sfo_cluster_node_srv)
                    for node_srv in sfo_con_node_srv:
                        if current_time - strft_2_timestamp(
                                node_srv.add_time) < 60 and (
                                    node_srv.srv_container != 'running'
                                    or node_srv.srv_container == 'stopped'):
                            abnormal_dic['container'].append(node_srv)
                if int(storage_json['object_online']) < int(
                        storage_json['object_num']):
                    sfo_obj_nodes = []
                    for node in sfo_cluster_nodes:
                        try:
                            node_role_js = json.loads(node.node_role)
                            if isinstance(node_role_js,
                                          dict) and node_role_js.get(
                                              'Object-Server') == 'YES':
                                sfo_obj_nodes.append(node)
                        except ValueError:
                            continue
                    sfo_obj_node_srv = filter(
                        lambda x: x.host_name in
                        [node.node_host_name for node in sfo_obj_nodes],
                        sfo_cluster_node_srv)
                    for node_srv in sfo_obj_node_srv:
                        if current_time - strft_2_timestamp(
                                node_srv.add_time) < 60 and (
                                    node_srv.srv_object != 'running'
                                    or node_srv.srv_object == 'stopped'):
                            abnormal_dic['object'].append(node_srv)
        if yesterady_cluster_set:
            instances.list = yesterady_cluster_set
        if sfo_aco_man:
            cluster_capacity_info['apply_system_total'] = len(sfo_aco_man)
            cluster_capacity_info['apply_systems_info'] = sfo_aco_man
        data.update({
            "cluster_physical": instances,
            "cluster_virtual": instances,
            "cluster_ops_24h_ago": instances,
            "band_width_24h_ago": instances
        })
    else:
        status = 404
        message = 'Not Found Record By %s' % cluster_name
    resp.update({"status": status, "data": data, "message": message})
    return resp, status