예제 #1
0
def test():
    """测试
    """
    return_value = normal_code
    result_value = "0"
    if softname == "dameng":
        sql_text = """\
        set HEA off;
        set LINESHOW off;
        set TIMING off;
        select 0;
        exit;
        """
        config_dict = {
            "test_config": {
                "config_file": sql_file,
                "config_context": sql_text,
                "mode": "w"
            }
        }
        result, msg = common.config(config_dict)
        if result:
            test_command = f"su -l {system_user} -c 'disql -S -L {dba_user}/{dba_password} \`{sql_file}'"
        else:
            log.logger.error(msg)
            return error_code
    log.logger.debug(f"test db comand: {test_command}")
    result, msg = common.exec_command(test_command)
    if result and msg.strip() == result_value:
        return normal_code
    else:
        log.logger.error(msg)
        return_value = error_code
    return return_value
예제 #2
0
def install():
    """安装
    """
    located = conf_dict.get("located")
    pkg_file = conf_dict["pkg_file"]
    value, msg = common.install(pkg_file, jdk_src, jdk_dst, jdk_pkg_dir,
                                located)
    if not value:
        log.logger.error(msg)
        sys.exit(error_code)

    # 配置
    jdk_dir = f"{located}/{jdk_dst}"
    jdk_sh_context = f"""\
            export JAVA_HOME={jdk_dir}
            export PATH=$JAVA_HOME/bin:$PATH
    """
    config_dict = {
        "jdk_sh": {
            "config_file": "/etc/profile.d/jdk.sh",
            "config_context": jdk_sh_context,
            "mode": "w"
        }
    }
    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)=}")
    result, msg = common.config(config_dict)
    if not result:
        log.logger.error(msg)
        return error_code
    return normal_code
예제 #3
0
파일: erlang.py 프로젝트: xhsky/autodep
def install():
    """安装
    """
    return_value=normal_code
    located=conf_dict.get("located")
    pkg_file=conf_dict["pkg_file"]
    erl_dest=f"{located}/{erl_dst}"
    value, msg=common.install(pkg_file, erl_src, erl_dest, erl_pkg_dir, located)
    if not value:
        log.logger.error(msg)
        return error_code

    # 配置
    erl_dir=f"{located}/{erl_dst}"
    erl_sh_context=f"""\
            export ERL_HOME={erl_dir}
            export PATH=$ERL_HOME/bin:$PATH
    """
    config_dict={
            "erl_sh":{
                "config_file": "/etc/profile.d/erl.sh", 
                "config_context": erl_sh_context, 
                "mode": "w"
                }
            }
    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)=}")
    result, msg=common.config(config_dict)
    if not result:
        log.logger.error(msg)
        return error_code
    return return_value
예제 #4
0
파일: set_hosts.py 프로젝트: xhsky/autodep
def main():
    """设置主机名, 填写hosts
    """
    log = Logger({"remote": log_remote_level}, logger_name="set_hosts")
    softname, action, conf_json = sys.argv[1:]
    conf_dict = json.loads(conf_json)
    hosts_info_dict = conf_dict["hosts_info"]
    located = conf_dict["located"]
    return_value = normal_code

    try:
        hostname = hosts_info_dict["hostname"]
        hosts_list = hosts_info_dict["hosts"]

        # 设置主机名
        log.logger.info(f"设置主机名为{hostname}")
        hostname_cmd = f"hostnamectl set-hostname {hostname}"
        log.logger.debug(f"{hostname_cmd=}")
        result, msg = exec_command(hostname_cmd)
        if not result:
            log.logger.error(f"设置主机名失败: {msg}")
            return_value = error_code

        # 配置hosts
        hosts_file = "/etc/hosts"

        config_dict = {
            "hosts": {
                "config_file": hosts_file,
                "config_context": "\n".join(hosts_list),
                "mode": "r+"
            }
        }
        result, msg = config(config_dict)
        if result:
            log.logger.info(f"hosts配置完成")
        else:
            log.logger.error(msg)
            return_value = error_code

        # 建立安装目录
        log.logger.info(f"建立安装目录: {located}")
        os.makedirs(located, exist_ok=1)
        if located != located_dir_link:
            if os.path.exists(located_dir_link):
                if os.path.islink(located_dir_link):
                    if os.readlink(located_dir_link) != located:
                        os.remove(located_dir_link)
                        os.symlink(located, located_dir_link)
                else:
                    log.logger.error(f"{located_dir_link}目录存在, 无法安装, 请移除该目录!")
                    return_value = error_code
            else:
                os.symlink(located, located_dir_link)
    except Exception as e:
        log.logger.error(f"{str(e)}")
        return_value = error_code
    sys.exit(return_value)
예제 #5
0
def backup():
    """program备份
    """
    # 获取最新配置并写入文件
    log.logger.info("备份配置文件...")
    config_data={
            "tenant": namespace_name, 
            "dataId": data_id, 
            "group": group_name
            }
    get_configs_url=f"{nacos_addr_url}{configs_path}"
    try:
        result=requests.get(get_configs_url, params=config_data)
        if result.status_code==200:
            log.logger.debug(f"配置获取成功: {data_id}")
            config_dict={
                    "config_sh": {
                        "config_file": config_file, 
                        "config_context": result.text, 
                        "mode": "w"
                        }
                    }
            log.logger.debug(f"写入配置文件: {config_file}")
            result, msg=common.config(config_dict)
            if not result:
                log.logger.error(msg)
                return error_code
        else:
            log.logger.error(f"配置获取失败: {result.status_code}: {result.text}")
            return error_code
    except Exception as e:
        log.logger.error(f"无法连接nacos: {str(e)}")
        return error_code

    log.logger.info("备份代码...")
    backup_version=conf_dict["backup_version"]
    backup_file_list=[]
    for backup_file in os.listdir(program_dir):
        if backup_file.endswith(".log") or backup_file.endswith(".bak"):
            pass
        else:
            backup_file_list.append(os.path.basename(backup_file))
    result, msg=common.tar_backup(backup_version, backup_dir, softname, program_dir, [])
    if result:
        return normal_code
    else:
        log.logger.error(msg)
        return error_code
예제 #6
0
def run():
    """运行
    """
    business_user = localization_info_dict["business_user"]
    business_password = localization_info_dict["business_password"]
    return_value = normal_code
    log.logger.debug(f"创建业务账号...")
    if softname == "dameng":
        #select distinct object_name from all_objects where object_type = 'sch' and owner='user1';
        create_sql_template = "create user %s identified by %s;\ngrant dba to %s;"
        create_command = f"su -l {system_user} -c 'disql -S -L {dba_user}/{dba_password} \`{sql_file}'"
        exit_sql = "exit;"
        create_sql_list = ["set TIMING off;"]
    for user, password in zip(business_user, business_password):
        create_sql = create_sql_template % (user, password, user)
        create_sql_list.append(create_sql)
    else:
        create_sql_list.append(exit_sql)

    config_dict = {
        "create_config": {
            "config_file": sql_file,
            "config_context": "\n".join(create_sql_list),
            "mode": "w"
        }
    }
    result, msg = common.config(config_dict)
    if not result:
        log.logger.error(msg)
        return error_code

    log.logger.debug(f"create user command: {create_command}")
    result, msg = common.exec_command(create_command, timeout=600)
    if result:
        if "错误" in msg or "error" in msg.lower():
            log.logger.error(msg)
            return_value = error_code
    else:
        log.logger.error(msg)
        return_value = error_code
    return return_value
예제 #7
0
def main():
    softname, action, conf_json = sys.argv[1:]
    conf_dict = json.loads(conf_json)
    log = common.Logger({"remote": log_remote_level})

    flag = 0
    # 安装
    if action == "install":
        located = conf_dict.get("located")
        pkg_file = conf_dict["pkg_file"]
        value, msg = common.install(pkg_file, jdk_src, jdk_dst, jdk_pkg_dir,
                                    located)
        if not value:
            flag = 1
            log.logger.error(msg)
            sys.exit(flag)

        # 配置
        jdk_dir = f"{located}/{jdk_dst}"
        jdk_sh_context = f"""\
                export JAVA_HOME={jdk_dir}
                export PATH=$JAVA_HOME/bin:$PATH
        """
        config_dict = {
            "jdk_sh": {
                "config_file": "/etc/profile.d/jdk.sh",
                "config_context": jdk_sh_context,
                "mode": "w"
            }
        }
        log.logger.debug(f"写入配置文件: {json.dumps(config_dict)=}")
        result, msg = common.config(config_dict)
        if not result:
            log.logger.error(msg)
            flag = 1
        sys.exit(flag)

    elif action == "run" or action == "start" or action == "stop":
        sys.exit(flag)
예제 #8
0
def run():
    """运行
    """
    return_value = normal_code
    #init_command=f"{mysql_dir}/bin/mysqld --initialize --user={mysql_user} --datadir={mysql_dir}/{my_data}"
    init_command = f"{mysql_dir}/bin/mysqld --initialize --user={mysql_user}"
    log.logger.debug(f"初始化中: {init_command=}")
    result, msg = common.exec_command(init_command, timeout=600)
    if result:
        try:
            log.logger.debug("获取随机密码")
            with open(f"{mysql_dir}/{my_logs}/mysqld.log", "r") as f:
                for i in f.readlines():
                    if "temporary password" in i:
                        pass_line = i.split(" ")
                        init_password = pass_line[-1].strip()
                        log.logger.debug(f"{init_password=}")
                        break
                else:
                    log.logger.error(f"初始化失败, 请查看MySQL日志")
        except Exception as e:
            log.logger.error(str(e))
            return error_code

        mysql_plugin_context = """\
                [mysqld]
                # plugin
                plugin-load-add=connection_control.so
                connection_control_failed_connections_threshold=10
                connection_control_min_connection_delay=1000
                """
        config_dict = {
            "my_plugin_cnf": {
                "config_file": my_plugin_cnf_file,
                "config_context": mysql_plugin_context,
                "mode": "w"
            }
        }
        log.logger.debug(f"写入配置文件: {json.dumps(config_dict)}")
        result, msg = common.config(config_dict)
        if result:
            start_command = f"systemctl start mysqld"
            log.logger.debug(f"{start_command=}")
            result, msg = common.exec_command(start_command, timeout=600)
            if result:
                log.logger.debug(f"检测端口: {port_list=}")
                if common.port_exist(port_list):
                    return_value = init(db_info_dict, mysql_dir, init_password,
                                        cluster_info_dict, role, log)
                else:
                    return_value = error_code
            else:
                log.logger.error(msg)
                return_value = error_code
        else:
            log.logger.error(msg)
            return_value = error_code
    else:
        log.logger.error(msg)
        return_value = error_code
    return return_value
예제 #9
0
def generate_sh(jar_file):
    """生成控制脚本
    """
    nacos_addr=f"{nacos_host}:{nacos_port}"
    jvm_mem=program_info_dict["jvm_mem"]
    #jar_file=f"{program_dir}/{pkg_file.split('/')[-1]}"
    jar_file=f"{program_dir}/{jar_file}"
    log_file=f"{program_dir}/{service_name}.log"
    program_sh_text=f"""\
            #!/bin/bash
            # sky

            action=$1
            jar_file={jar_file}
            jar_name=`echo $jar_file | rev | cut -d "/" -f 1 | rev`
            nacos_addr={nacos_addr}
            nacos_namespace={namespace_name}
            nacos_group={group_name}
            nacos_config_file_extension={config_file_type}
            nacos_application_name={service_name}         # 须同jar_name配套
            nacos_profiles_active={config_active}         # 须同jar_name配套

            if [ -z "$1" ]; then
              echo "Usage: $0 start|stop|publish"
              exit {error_code}
            elif [ "$action" == "start" ]; then
              jvm_mem={jvm_mem}
              accept_count=1000
              threads=500
              max_connections=8192


              log_file={log_file}

              nohup java -jar -Xms${{jvm_mem}} -Xmx${{jvm_mem}} ${{jar_file}} \\
                --spring.cloud.nacos.server-addr=$nacos_addr \\
                --spring.cloud.nacos.config.namespace=$nacos_namespace \\
                --spring.cloud.nacos.config.group=$nacos_group \\
                --spring.cloud.nacos.config.file-extension=$nacos_config_file_extension \\
                --spring.cloud.nacos.config.enabled=True \\
                --spring.cloud.nacos.discovery.enabled=True \\
                --spring.cloud.nacos.discovery.namespace=$nacos_namespace \\
                --spring.cloud.nacos.discovery.group=$nacos_group \\
                --spring.application.name=$nacos_application_name \\
                --spring.profiles.active=$nacos_profiles_active \\
                --server.tomcat.accept-count=$accept_count \\
                --server.tomcat.min-spare-threads=$threads \\
                --server.tomcat.max-threads=$threads \\
                --server.tomcat.max-connections=$max_connections \\
                &> $log_file &
              echo "$jar_name启动中, 详细请查看日志文件($log_file)."
              exit {normal_code}
            elif [ "$action" == "stop" ]; then
              N=0
              while : ;do
                N=$((N+1))
                Pid=`ps ax | grep java | grep "$jar_name" |  grep -v grep | awk '{{print $1}}'`
                if [ -z "$Pid" ]; then
                  if [ $N == 1 ]; then
                    echo "${{jar_name}}未运行. "
                    exit {stopped_code}
                  else
                    echo "${{jar_name}}已关闭."
                    exit {normal_code}
                  fi
                else
                  if [ $N == 1 ]; then
                    echo "Pid: $Pid"
                    echo "${{jar_name}}关闭中..."
                    kill $Pid
                  fi

                  if [ $N == 30 ]; then
                    kill -9 $Pid
                  fi
                fi
                sleep 1
              done
            elif [ "$action" == "publish" ]; then
              content=`cat {program_dir}/app.${{nacos_config_file_extension}}`
              curl -X POST "http://${{nacos_addr}}{configs_path}" -d tenant=${{nacos_namespace}} -d dataId=${{nacos_application_name}}-${{nacos_profiles_active}}.${{nacos_config_file_extension}} -d group=${{nacos_group}} --data-urlencode content="${{content}}" -d type=${{nacos_config_file_extension}}
            else
              echo "Usage: $0 start|stop|publish"
            fi
    """
    config_dict={
            "program_sh": {
                "config_file": program_sh_file, 
                "config_context": program_sh_text, 
                "mode": "w"
                }
            }
    log.logger.debug(f"写入配置文件: {program_sh_file}")
    result, msg=common.config(config_dict)
    if not result:
        log.logger.error(msg)
        return False
    return True
예제 #10
0
def install():
    """安装
    """
    return_value = normal_code
    pkg_file = conf_dict["pkg_file"]
    value, msg = common.install(pkg_file, rocketmq_src, rocketmq_dst,
                                rocketmq_pkg_dir, located)
    if not value:
        log.logger.error(msg)
        return error_code

    cluster_name = rocketmq_info_dict.get("cluster_name")
    broker_name = rocketmq_info_dict.get("replica_name")
    replica_role = rocketmq_info_dict.get("replica_role").lower()

    log_file_list = [
        f"{rocketmq_dir}/conf/logback_namesrv.xml",
        f"{rocketmq_dir}/conf/logback_broker.xml",
        f"{rocketmq_dir}/conf/logback_tools.xml"
    ]
    store_dir = {
        "storePathRootDir": f"{rocketmq_dir}/store",
        "storePathCommitLog": f"{rocketmq_dir}/store/commitlog",
        "storePathConsumerQueue": f"{rocketmq_dir}/store/consumequeue",
        "storePathIndex": f"{rocketmq_dir}/store/index"
    }

    namesrv_config_file = f"{rocketmq_dir}/conf/nameserver.conf"
    broker_config_file = f"{rocketmq_dir}/conf/broker.conf"
    if replica_role == "master":
        broker_id = 0
        broker_role = "ASYNC_MASTER"
    elif replica_role == "slave":
        broker_id = 1
        broker_role = "SLAVE"
    elif replica_role == "dledger":
        dLegerSelfId = rocketmq_info_dict.get("dledger_id")
        dLegerPeers = ";".join(rocketmq_info_dict.get("members"))
        broker_config_file = f"{rocketmq_dir}/conf/dledger.conf"
        store_dir[
            "storeDledgerDir"] = f"{rocketmq_dir}/store/dledger-{dLegerSelfId}/data"

    # 配置
    ## 建立存储目录
    for key in store_dir:
        try:
            dir_ = store_dir[key]
            log.logger.debug(f"建立目录: {dir_}")
            os.makedirs(dir_, exist_ok=1)
        except Exception as e:
            log.logger.error(f"无法建立{dir_}目录: {str(e)}")
            return error_code

    namesrv_mem = rocketmq_info_dict.get("namesrv_mem")
    broker_mem = rocketmq_info_dict.get("broker_mem")
    jvm_dict = {
        f"{rocketmq_dir}/bin/runserver.sh": namesrv_mem,
        f"{rocketmq_dir}/bin/runbroker.sh": broker_mem
    }
    ## 配置jvm
    for jvm_file in jvm_dict:
        jvm_mem = jvm_dict[jvm_file]
        log.logger.debug(f"修改jvm: {jvm_file}:{jvm_mem}")
        config_jvm(jvm_file, jvm_mem, log)
    ## 配置日志目录
    for log_file in log_file_list:
        command = f"sed -i 's#${{user.home}}#{rocketmq_dir}#' {log_file}"
        log.logger.debug(f"修改日志目录: {command}")
        result, msg = common.exec_command(command)
        if not result:
            log.logger.error(msg)
            return_value = error_code

    ## 修改配置文件
    ## 添加启动脚本, 原脚本无法远程后台启动
    start_sh_text = f"""\
            #!/bin/bash
            # sky

            service_type=$1

            if [ "$service_type" == 'namesrv' ]; then
                nohup bash ./bin/mqnamesrv -c {namesrv_config_file} &> namesrv.log &
            elif [ "$service_type" == 'broker' ]; then
                nohup bash ./bin/mqbroker -c {broker_config_file} &> broker.log &
            else
                echo "Usage: bash $0 namesrv|broker"
            fi
    """
    start_sh_file = f"{rocketmq_dir}/bin/start.sh"

    namesrv_config_text = f"""\
            rocketmqHome={rocketmq_dir}
            listenPort={namesrv_port}
    """

    namesrv_list = ";".join(rocketmq_info_dict.get("namesrvs"))
    if replica_role == "master" or replica_role == "slave":
        broker_mode_config = f"""
                brokerId={broker_id}
                haListenPort={ha_port}
                deleteWhen=04
                fileReservedTime=48
                brokerRole={broker_role}
                flushDiskType=ASYNC_FLUSH
                """
    elif replica_role == "dledger":
        broker_mode_config = f"""
                enableDLegerCommitLog=true
                dLegerGroup={broker_name}
                dLegerSelfId={dLegerSelfId}
                dLegerPeers={dLegerPeers}
                sendMessageThreadPoolNums=4
                """

    broker_config_text = f"""\
                brokerClusterName={cluster_name}
                brokerName={broker_name}
                listenPort={remote_port}
                namesrvAddr={namesrv_list}

                {broker_mode_config}

                storePathRootDir={store_dir['storePathRootDir']}
                storePathCommitLog={store_dir['storePathCommitLog']}
                storePathConsumerQueue={store_dir['storePathConsumerQueue']}
                storePathIndex={store_dir['storePathIndex']}
                mapedFileSizeCommitLog=1G
        """

    rocketmq_sh_text = f"""\
            export ROCKETMQ_HOME={rocketmq_dir}
            export PATH=$ROCKETMQ_HOME/bin:$PATH
    """
    config_dict = {
        "namesrv_config": {
            "config_file": namesrv_config_file,
            "config_context": namesrv_config_text,
            "mode": "w"
        },
        "broker_config": {
            "config_file": broker_config_file,
            "config_context": broker_config_text,
            "mode": "w"
        },
        "rocketmq_sh": {
            "config_file": "/etc/profile.d/rocketmq.sh",
            "config_context": rocketmq_sh_text,
            "mode": "w"
        },
        "start_sh": {
            "config_file": start_sh_file,
            "config_context": start_sh_text,
            "mode": "w"
        }
    }
    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)}")
    result, msg = common.config(config_dict)
    if result:
        pass
    else:
        log.logger.error(msg)
        return_value = error_code
    return return_value
예제 #11
0
def install():
    """安装
    """
    return_value = 0
    pkg_file = conf_dict["pkg_file"]
    command = "id -u dps > /dev/null 2>&1 || useradd -r dps"
    log.logger.debug(f"创建用户: {command=}")
    result, msg = common.exec_command(command)
    if not result:
        log.logger.error(msg)
    value, msg = common.install(pkg_file, dps_src, dps_dst, dps_pkg_dir,
                                located)
    if not value:
        log.logger.error(msg)
        return error_code

    # vhost文件路径
    vhosts_file = f"{dps_dir}/conf/vhosts.conf"
    try:
        server_config_list = []
        for port in vhosts_info_dict:
            dps_server_config = f"""\
                    server {{
                        listen       {int(port)};
                        server_name  localhost;

                        #charset koi8-r;

                        access_log  logs/{port}.access.log  main;

                        error_page   500 502 503 504  /50x.html;
                        location = /50x.html {{
                            root   html;
                        }}

                        location = /favicon.ico {{
                            return 200;     		# 忽略浏览器的title前面的图标
                        }}
            """
            dps_server_config = textwrap.dedent(dps_server_config)
            for name in vhosts_info_dict[port]:
                mode = vhosts_info_dict[port][name]["mode"]
                if mode == "proxy":
                    # 配置upstream
                    proxy_name = vhosts_info_dict[port][name]["proxy_name"]
                    upstream_servers = f"upstream {proxy_name} {{"
                    for proxy_host in vhosts_info_dict[port][name][
                            "proxy_hosts"]:
                        upstream_servers = f"{upstream_servers}\n\tserver {proxy_host};"
                    else:
                        upstream_servers = f"{upstream_servers}\n}}"
                    dps_server_config = f"{upstream_servers}\n{dps_server_config}"

                    name_config = textwrap.indent(
                        textwrap.dedent(f"""\
                            location {name} {{
                                proxy_pass http://{proxy_name};
                            }}
                    """), "    ")
                elif mode == "location":
                    name_config = textwrap.indent(
                        textwrap.dedent(f"""\
                            location {name} {{
                                root {vhosts_info_dict[port][name]['frontend_dir']};
                            }}
                    """), "    ")
                dps_server_config = f"{dps_server_config}\n{name_config}"
            else:
                dps_server_config = f"{dps_server_config}\n}}"
                server_config_list.append(dps_server_config)
    except Exception as e:
        log.logger.error(f"配置vhosts失败: {str(e)}")
        return error_code

    # 配置
    worker_processes = dps_info_dict.get("worker_processes")
    dps_conf_text = f"""\
            user  dps;
            worker_processes  {worker_processes};

            #error_log  logs/error.log;
            #error_log  logs/error.log  notice;
            error_log  logs/error.log  info;

            pid        logs/dps.pid;
            worker_rlimit_nofile 65535;

            events {{
                use epoll;
                worker_connections  65535;
            }}

            http {{
                include       mime.types;
                default_type  application/octet-stream;
                charset utf-8;

                log_format  main  '$remote_addr$server_port - $remote_user [$time_local] "$request" '
                                  '$status $body_bytes_sent "$http_referer" '
                                  '"$http_user_agent" "$http_x_forwarded_for"'
				  '"$upstream_addr" "$upstream_status" "$upstream_response_time" "$request_time"';

                #access_log  logs/access.log  main;

                server_tokens   off;
                server_names_hash_bucket_size 128;
                client_header_buffer_size 512k;
                large_client_header_buffers 4 512k;

                client_max_body_size 20m;
                sendfile        on;
                tcp_nopush     on;
                tcp_nodelay on;
                keepalive_timeout  65;

                # gzip
                gzip  on;
                gzip_min_length 1k;
                gzip_buffers 16 64k;
                #gzip_types text/plain application/x-javascript text/css application/xml;
                gzip_types text/plain application/javascript application/x-javascript text/css application/xml text/javascript application/x-httpd-php image/jpeg image/gif image/png;
                gzip_vary on;


                # 使客户端请求header中带有下划线的字段生效
                underscores_in_headers on;
                
                # proxy
                proxy_intercept_errors on;			# 启用error_page
                proxy_set_header Host $http_host; 
                proxy_set_header X-Real-IP $remote_addr;
                proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
                proxy_buffer_size 512k;			# userLogin接口, 用户header信息的缓冲区大小
                proxy_buffers 32 128k;
                
                #proxy_connect_timeout 3000;
                #proxy_read_timeout 600000;
                #proxy_send_timeout 600000;
                #open_file_cache max=204800 inactive=30s;
                #open_file_cache_min_uses 1;
                #open_file_cache_valid 50s;
                #send_timeout  60;
                #proxy_request_buffering off;

                include {vhosts_file};
            }}
            """
    dps_conf_file = f"{dps_dir}/conf/dps.conf"
    config_dict = {
        "dps_conf": {
            "config_file": dps_conf_file,
            "config_context": dps_conf_text,
            "mode": "w"
        },
        "vhosts_conf": {
            "config_file": vhosts_file,
            "config_context": "\n".join(server_config_list),
            "mode": "w"
        }
    }
    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)=}")
    result, msg = common.config(config_dict)
    if result:
        command = f"cd {dps_dir} && ./sbin/dps-server -t"
        log.logger.debug(f"检测配置文件: {command=}")
        result, msg = common.exec_command(command)
        if not result:
            log.logger.error(msg)
            return_value = error_code
    else:
        log.logger.error(msg)
        return_value = error_code
    return return_value
예제 #12
0
파일: nacos.py 프로젝트: xhsky/autodep
def install():
    """安装
    """
    return_value = 0
    pkg_file = conf_dict["pkg_file"]
    value, msg = common.install(pkg_file, nacos_src, nacos_dst, nacos_pkg_dir,
                                located)
    if not value:
        log.logger.error(msg)
        return error_code
    #else:
    #    nacos_dst=nacos_src

    jvm_mem = nacos_info_dict["jvm_mem"]
    jvm_command = f"sed 's/-Xms512m -Xmx512m -Xmn256m/-Xms{jvm_mem} -Xmx{jvm_mem}/' {nacos_dir}/bin/startup.sh"

    nacos_conf_text = f"""\
            # Spring Boot 
            server.servlet.contextPath=/nacos
            server.port={web_port}

            # Network 
            nacos.inetutils.prefer-hostname-over-ip=True
            # nacos.inetutils.ip-address=

            # Connection pool 
            db.pool.config.connectionTimeout=30000
            db.pool.config.validationTimeout=10000
            db.pool.config.maximumPoolSize=20
            db.pool.config.minimumIdle=2

            nacos.naming.empty-service.auto-clean=true
            nacos.naming.empty-service.clean.initial-delay-ms=50000
            nacos.naming.empty-service.clean.period-time-ms=30000

            # Metrics 
            management.metrics.export.elastic.enabled=false
            management.metrics.export.influx.enabled=false

            # Access Log 
            server.tomcat.accesslog.enabled=true
            server.tomcat.accesslog.pattern=%h %l %u %t "%r" %s %b %D %{{User-Agent}}i %{{Request-Source}}i
            server.tomcat.basedir=

            # Access Control
            #spring.security.enabled=false
            nacos.security.ignore.urls=/,/error,/**/*.css,/**/*.js,/**/*.html,/**/*.map,/**/*.svg,/**/*.png,/**/*.ico,/console-ui/public/**,/v1/auth/**,/v1/console/health/**,/actuator/**,/v1/console/server/**
            nacos.core.auth.system.type=nacos
            nacos.core.auth.enabled=false
            nacos.core.auth.default.token.expire.seconds=18000
            nacos.core.auth.default.token.secret.key=SecretKey012345678901234567890123456789012345678901234567890123456789
            nacos.core.auth.caching.enabled=true
            nacos.core.auth.server.identity.key=111
            nacos.core.auth.server.identity.value=222

            nacos.istio.mcp.server.enabled=false
            """
    mode = nacos_info_dict["data_source"]["mode"]
    if mode == "mysql":
        db_host = nacos_info_dict["data_source"]["mysql_info"]["db_host"]
        db_port = nacos_info_dict["data_source"]["mysql_info"]["db_port"]
        db_name = nacos_info_dict["data_source"]["mysql_info"]["db_name"]
        db_user = nacos_info_dict["data_source"]["mysql_info"]["db_user"]
        db_password = nacos_info_dict["data_source"]["mysql_info"][
            "db_password"]
        mysql_conf_text = f"""\
            # Config Module
            spring.datasource.platform=mysql
            db.num=1
            db.url.0=jdbc:mysql://{db_host}:{db_port}/{db_name}?characterEncoding=utf8&connectTimeout=1000&socketTimeout=3000&autoReconnect=true&useUnicode=true&useSSL=false&serverTimezone=UTC
            db.user.0={db_user}
            db.password.0={db_password}
        """
        nacos_conf_text = f"{nacos_conf_text}\n{mysql_conf_text}"
        jvm_command = f"sed 's/-server -Xms{jvm_mem} -Xmx{jvm_mem} -Xmn1g/-server -Xms1g -Xmx1g -Xmn256m/' {nacos_dir}/bin/startup.sh"
    nacos_conf_file = f"{nacos_dir}/conf/application.properties"
    config_dict = {
        "nacos_conf": {
            "config_file": nacos_conf_file,
            "config_context": nacos_conf_text,
            "mode": "w"
        }
    }

    if cluster_flag:
        cluster_info_text = "\n".join(cluster_info_dict["members"])
        cluster_conf_file = f"{nacos_dir}/conf/cluster.conf"
        config_dict["cluster_conf"] = {
            "config_file": cluster_conf_file,
            "config_context": cluster_info_text,
            "mode": "w"
        }
    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)=}")
    result, msg = common.config(config_dict)
    if result:
        log.logger.debug("修改jvm")
        result, msg = common.exec_command(jvm_command)
        if result:
            return_value = normal_code
        else:
            log.logger.error(msg)
            return_value = error_code
    else:
        log.logger.error(msg)
        return_value = error_code
    return return_value
예제 #13
0
def install():
    """安装
    """
    located = conf_dict.get("located")
    pkg_file = conf_dict["pkg_file"]
    value, msg = common.install(pkg_file, backup_tool_src, backup_tool_dst,
                                backup_tool_pkg_dir, located)
    if not value:
        log.logger.error(msg)
        sys.exit(error_code)

    # 配置各个备份信息的config文件
    config_dict = {}
    crontab_str = ""
    type_key_str = "type"
    for i in backup_tool_info_dict:
        backup_type_dict = backup_tool_info_dict[i]
        for j in backup_type_dict:
            if j != type_key_str:
                log.logger.debug(f"生成{i}_{j}配置")
                backup_type_dict[j][type_key_str] = backup_type_dict[
                    type_key_str]
                backup_type_dict[j]["keyword"] = j
                file_name = f"{i}_{j}"
                config_file = f"{backup_tool_config_dir}/{file_name}.json"
                config_dict[file_name] = {
                    "config_file": config_file,
                    "config_context": backup_type_dict[j],
                    "mode": "w",
                    "type": "json"
                }
                timing = backup_type_dict[j]["timing"]
                crontab_str = f"{crontab_str}{timing} {remote_python_exec} {backup_tool_dir}/bin/backup.py {config_file}\n"
    else:  # 将各个备份的定时语法写入文件
        log.logger.debug(f"生成crontab配置")
        crontab_list_command = "bash -lc 'crontab -l'"
        log.logger.debug(f"{crontab_list_command=}")
        result, msg = common.exec_command(crontab_list_command, timeout=5)
        if result:
            crontab_list_str = msg
        else:
            if msg.strip() == "no crontab for root":
                crontab_list_str = ""
            else:
                log.logger.error(msg)
                return error_code

        config_dict["crontab_list"] = {
            "config_file": backup_crontab_file,
            "config_context": crontab_list_str,
            "mode": "w"
        }
        config_dict["crontab"] = {
            "config_file": backup_crontab_file,
            "config_context": crontab_str,
            "mode": "r+"
        }

    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)=}")
    result, msg = common.config(config_dict)
    if not result:
        log.logger.error(msg)
        return error_code
    return normal_code
예제 #14
0
파일: tomcat.py 프로젝트: xhsky/autodep
def main():
    softname, action, conf_json = sys.argv[1:]
    conf_dict = json.loads(conf_json)
    located = conf_dict.get("located")

    log = common.Logger({"remote": log_remote_level}, loggger_name="tomcat")
    tomcat_dir = f"{located}/{tomcat_dst}"
    tomcat_info_dict = conf_dict["tomcat_info"]
    http_port = tomcat_info_dict["port"].get("http_port")
    shutdown_port = tomcat_info_dict["port"].get("shutdown_port")
    #ajp_port=tomcat_info_dict["port"].get("ajp_port")
    ajp_port = 8009
    port_list = [http_port, shutdown_port]

    flag = 0
    # 安装
    if action == "install":
        pkg_file = conf_dict["pkg_file"]
        value, msg = common.install(pkg_file, tomcat_src, tomcat_dst,
                                    tomcat_pkg_dir, located)
        if not value:
            log.logger.error(msg)
            flag = 1
            sys.exit(flag)

        # 配置
        try:
            # 删除tomcat原有程序目录
            log.logger.debug("删除默认程序")
            webapps_dir = f"{tomcat_dir}/webapps"
            for i in os.listdir(webapps_dir):
                shutil.rmtree(f"{webapps_dir}/{i}")
        except Exception as e:
            log.logger.error(str(e))

        jvm_mem = tomcat_info_dict.get("jvm_mem")
        min_threads, max_threads = tomcat_info_dict.get("threads")
        max_connections = tomcat_info_dict.get("max_connections")

        tomcat_sh_context = f"""\
            export CATALINA_HOME={tomcat_dir}
            export PATH=$CATALINA_HOME/bin:$PATH
        """
        server_xml_context = f"""\
            <?xml version="1.0" encoding="UTF-8"?>
            <!--
              Licensed to the Apache Software Foundation (ASF) under one or more
              contributor license agreements.  See the NOTICE file distributed with
              this work for additional information regarding copyright ownership.
              The ASF licenses this file to You under the Apache License, Version 2.0
              (the "License"); you may not use this file except in compliance with
              the License.  You may obtain a copy of the License at

                  http://www.apache.org/licenses/LICENSE-2.0

              Unless required by applicable law or agreed to in writing, software
              distributed under the License is distributed on an "AS IS" BASIS,
              WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
              See the License for the specific language governing permissions and
              limitations under the License.
            -->
            <!-- Note:  A "Server" is not itself a "Container", so you may not
                 define subcomponents such as "Valves" at this level.
                 Documentation at /docs/config/server.html
             -->
            <Server port="{shutdown_port}" shutdown="SHUTDOWN">
              <Listener className="org.apache.catalina.startup.VersionLoggerListener" />
              <!-- Security listener. Documentation at /docs/config/listeners.html
              <Listener className="org.apache.catalina.security.SecurityListener" />
              -->
              <!--APR library loader. Documentation at /docs/apr.html -->
              <Listener className="org.apache.catalina.core.AprLifecycleListener" SSLEngine="on" />
              <!-- Prevent memory leaks due to use of particular java/javax APIs-->
              <Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
              <Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
              <Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />

              <!-- Global JNDI resources
                   Documentation at /docs/jndi-resources-howto.html
              -->
              <GlobalNamingResources>
                <!-- Editable user database that can also be used by
                     UserDatabaseRealm to authenticate users
                -->
                <Resource name="UserDatabase" auth="Container"
                          type="org.apache.catalina.UserDatabase"
                          description="User database that can be updated and saved"
                          factory="org.apache.catalina.users.MemoryUserDatabaseFactory"
                          pathname="conf/tomcat-users.xml" />
              </GlobalNamingResources>

              <!-- A "Service" is a collection of one or more "Connectors" that share
                   a single "Container" Note:  A "Service" is not itself a "Container",
                   so you may not define subcomponents such as "Valves" at this level.
                   Documentation at /docs/config/service.html
               -->
              <Service name="Catalina">

                <!--The connectors can use a shared executor, you can define one or more named thread pools-->
                <!--
                <Executor name="tomcatThreadPool" namePrefix="catalina-exec-"
                    maxThreads="150" minSpareThreads="4"/>
                -->


                <!-- A "Connector" represents an endpoint by which requests are received
                     and responses are returned. Documentation at :
                     Java HTTP Connector: /docs/config/http.html
                     Java AJP  Connector: /docs/config/ajp.html
                     APR (HTTP/AJP) Connector: /docs/apr.html
                     Define a non-SSL/TLS HTTP/1.1 Connector on port 8080
                -->
                <Connector port="{http_port}" protocol="HTTP/1.1"
                           maxHttpHeaderSize="8192"  
                           maxThreads="{max_threads}"  
                           minSpareThreads="{min_threads}"  
                           enableLookups="false"  
                           compression="on"  
                           compressionMinSize="2048"  
                           URIEncoding="utf-8"  
                           acceptCount="300"  
                           disableUploadTimeout="true"
                           maxConnections="{max_connections}"
                           connectionTimeout="20000"
                           redirectPort="8443" />
                <!-- A "Connector" using the shared thread pool-->
                <!--
                <Connector executor="tomcatThreadPool"
                           port="8080" protocol="HTTP/1.1"
                           connectionTimeout="20000"
                           redirectPort="8443" />
                -->
                <!-- Define an SSL/TLS HTTP/1.1 Connector on port 8443
                     This connector uses the NIO implementation. The default
                     SSLImplementation will depend on the presence of the APR/native
                     library and the useOpenSSL attribute of the
                     AprLifecycleListener.
                     Either JSSE or OpenSSL style configuration may be used regardless of
                     the SSLImplementation selected. JSSE style configuration is used below.
                -->
                <!--
                <Connector port="8443" protocol="org.apache.coyote.http11.Http11NioProtocol"
                           maxThreads="150" SSLEnabled="true">
                    <SSLHostConfig>
                        <Certificate certificateKeystoreFile="conf/localhost-rsa.jks"
                                     type="RSA" />
                    </SSLHostConfig>
                </Connector>
                -->
                <!-- Define an SSL/TLS HTTP/1.1 Connector on port 8443 with HTTP/2
                     This connector uses the APR/native implementation which always uses
                     OpenSSL for TLS.
                     Either JSSE or OpenSSL style configuration may be used. OpenSSL style
                     configuration is used below.
                -->
                <!--
                <Connector port="8443" protocol="org.apache.coyote.http11.Http11AprProtocol"
                           maxThreads="150" SSLEnabled="true" >
                    <UpgradeProtocol className="org.apache.coyote.http2.Http2Protocol" />
                    <SSLHostConfig>
                        <Certificate certificateKeyFile="conf/localhost-rsa-key.pem"
                                     certificateFile="conf/localhost-rsa-cert.pem"
                                     certificateChainFile="conf/localhost-rsa-chain.pem"
                                     type="RSA" />
                    </SSLHostConfig>
                </Connector>
                -->

                <!-- Define an AJP 1.3 Connector on port 8009 -->
                <!--
                <Connector protocol="AJP/1.3"
                           address="::1"
                           port="{ajp_port}"
                           redirectPort="8443" />
                -->

                <!-- An Engine represents the entry point (within Catalina) that processes
                     every request.  The Engine implementation for Tomcat stand alone
                     analyzes the HTTP headers included with the request, and passes them
                     on to the appropriate Host (virtual host).
                     Documentation at /docs/config/engine.html -->

                <!-- You should set jvmRoute to support load-balancing via AJP ie :
                <Engine name="Catalina" defaultHost="localhost" jvmRoute="jvm1">
                -->
                <Engine name="Catalina" defaultHost="localhost">

                  <!--For clustering, please take a look at documentation at:
                      /docs/cluster-howto.html  (simple how to)
                      /docs/config/cluster.html (reference documentation) -->
                  <!--
                  <Cluster className="org.apache.catalina.ha.tcp.SimpleTcpCluster"/>
                  -->

                  <!-- Use the LockOutRealm to prevent attempts to guess user passwords
                       via a brute-force attack -->
                  <Realm className="org.apache.catalina.realm.LockOutRealm">
                    <!-- This Realm uses the UserDatabase configured in the global JNDI
                         resources under the key "UserDatabase".  Any edits
                         that are performed against this UserDatabase are immediately
                         available for use by the Realm.  -->
                    <Realm className="org.apache.catalina.realm.UserDatabaseRealm"
                           resourceName="UserDatabase"/>
                  </Realm>

                  <Host name="localhost"  appBase="webapps"
                        unpackWARs="true" autoDeploy="true">

                    <!-- SingleSignOn valve, share authentication between web applications
                         Documentation at: /docs/config/valve.html -->
                    <!--
                    <Valve className="org.apache.catalina.authenticator.SingleSignOn" />
                    -->

                    <!-- Access log processes all example.
                         Documentation at: /docs/config/valve.html
                         Note: The pattern used is equivalent to using pattern="common" -->
                    <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs"
                           prefix="localhost_access_log" suffix=".txt"
                           pattern="%h %l %u %t &quot;%r&quot; %s %b" />

                  </Host>
                </Engine>
              </Service>
            </Server>
            """
        setevn_sh_context = f"""\
            #!/bin/bash
            # sky 

            JAVA_OPTS="-server -XX:+AggressiveOpts -XX:+UseBiasedLocking -XX:+DisableExplicitGC -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:+CMSParallelRemarkEnabled -XX:+UseFastAccessorMethods -XX:+UseCMSInitiatingOccupancyOnly -Djava.security.egd=file:/dev/./urandom -Djava.awt.headless=true"

            JAVA_OPTS="$JAVA_OPTS -Xms{jvm_mem} -Xmx{jvm_mem} -Xss512k -XX:LargePageSizeInBytes=128M -XX:MaxTenuringThreshold=11 -XX:MetaspaceSize=200m -XX:MaxMetaspaceSize=256m -XX:MaxNewSize=256m"

            UMASK=0022

            CATALINA_PID=$CATALINA_HOME/bin/catalina.pid
            """
        config_dict = {
            "server_xml": {
                "config_file": f"{tomcat_dir}/conf/server.xml",
                "config_context": server_xml_context,
                "mode": "w"
            },
            "setenv_sh": {
                "config_file": f"{tomcat_dir}/bin/setenv.sh",
                "config_context": setevn_sh_context,
                "mode": "w"
            },
            "tomcat_sh": {
                "config_file": f"/etc/profile.d/tomcat.sh",
                "config_context": tomcat_sh_context,
                "mode": "w"
            }
        }

        log.logger.debug(f"写入配置文件: {json.dumps(config_dict)=}")
        result, msg = common.config(config_dict)
        if result:
            command = f"{tomcat_dir}/bin/catalina.sh configtest"
            log.logger.debug(f"配置文件检测: {command=}")
            status, result = common.exec_command(command)
            # 返回值32512为apr未安装报错, 忽略
            if status:
                if result.returncode != 0 and result.returncode != 32512:
                    log.logger.error(result.stderr)
                    flag = 1
            else:
                log.logger.error(msg)
                flag = 1
        else:
            log.logger.error(msg)
            flag = 1

        sys.exit(flag)

    elif action == "run":
        command = f"set -m ; {tomcat_dir}/bin/catalina.sh start"
        log.logger.debug(f"{command=}")
        status, result = common.exec_command(command)
        if status:
            if result.returncode != 0:
                log.logger.error(result.stderr)
                flag = 1
            else:
                log.logger.debug(f"检测端口: {port_list=}")
                if not common.port_exist(port_list):
                    flag = 2
        else:
            log.logger.error(result)
            flag = 1

        sys.exit(flag)
    elif action == "start":
        pass
    elif action == "stop":
        pass
예제 #15
0
def install():
    """安装
    """
    return_value = normal_code
    pkg_file = conf_dict["pkg_file"]
    command = f"id -u {mysql_user} > /dev/null 2>&1 || useradd -r -s /bin/false {mysql_user}"
    log.logger.debug(f"创建用户: {command=}")
    result, msg = common.exec_command(command)
    if not result:
        log.logger.error(msg)
        return_value = error_code

    value, msg = common.install(pkg_file, mysql_src, mysql_dst, mysql_pkg_dir,
                                located)
    if not value:
        log.logger.error(msg)
        sys.exit(error_code)

    # 配置
    mk_dirs_commands = f"mkdir -p {mysql_dir}/{my_data} && mkdir -p {mysql_dir}/{my_logs}/binlog && mkdir -p {mysql_dir}/{my_logs} && mkdir -p {mysql_dir}/{my_logs}/redolog && mkdir -p {mysql_dir}/{my_logs}/undolog && mkdir -p {mysql_dir}/{my_logs}/relay && chown -R {mysql_user}:{mysql_user} {located}/{mysql_src}* && ln -snf {located}/{mysql_src}* /usr/local/mysql && \cp -f {mysql_dir}/support-files/mysql.server /etc/init.d/mysqld && systemctl daemon-reload"
    log.logger.debug(f"建立目录, 授权: {mk_dirs_commands=}")
    result, msg = common.exec_command(mk_dirs_commands)
    if not result:
        log.logger.error(msg)
        return error_code

    mem = db_info_dict.get("innodb_mem")
    server_id = db_info_dict.get("server_id")
    max_connections = db_info_dict.get("max_connections")

    mysql_sh_context = f"""\
        export MySQL_HOME={mysql_dir}
        export PATH=$MySQL_HOME/bin:$PATH
    """
    my_cnf_context = f"""\
        [mysqld]
        # dir
        datadir={mysql_dir}/{my_data}
        #secure_file_priv=/var/lib/mysql-files
        pid_file={mysql_dir}/{my_data}/mysqld.pid

        # network
        #socket=/tmp/mysql.sock
        port={mysql_port}
        max_connections={max_connections}

        # general set
        lower_case_table_names=1
        default_authentication_plugin=mysql_native_password
        default-time-zone='+08:00'
        wait_timeout=600
        default_password_lifetime=90

        # Log 
        ## Error Log
        log_error={mysql_dir}/{my_logs}/mysqld.log
        log_timestamps=system
        ## Slow log
        log_output=file
        slow_query_log=1
        long_query_time=2

        # bin log
        server_id={server_id}
        log_bin={mysql_dir}/{my_logs}/binlog/binlog
        binlog_format=row
        binlog_row_event_max_size=8192
        binlog_checksum=crc32
        max_binlog_size=512M

        binlog_cache_size=128K
        binlog_stmt_cache_size=32K
        max_binlog_cache_size=8G
        max_binlog_stmt_cache_size=2G

        binlog_error_action=abort_server
        binlog_expire_logs_seconds=0

        sync_binlog=1
        binlog_group_commit_sync_delay=0

        default_storage_engine=innodb

        # innodb
        gtid_mode=on
        enforce_gtid_consistency=1
        ## buffer pool
        innodb_buffer_pool_size={mem}
        innodb_change_buffer_max_size=25
        innodb_buffer_pool_instances=16

        ## redo log
        innodb_log_group_home_dir={mysql_dir}/{my_logs}/redolog
        innodb_log_file_size=256M
        innodb_log_files_in_group=4

        ## log buffer
        innodb_log_buffer_size=16M
        innodb_flush_log_at_trx_commit=1

        ## tablespace
        ### system tablespace
        innodb_file_per_table=1
        ### undo tablespace
        innodb_undo_directory={mysql_dir}/{my_logs}/undolog
        innodb_rollback_segments=128
        innodb_max_undo_log_size=1G

        !include {my_plugin_cnf_file}
        !include {my_client_cnf_file}

        log_replica_updates=1
        [client]
    """
    my_cnf_file = f"/etc/my.cnf"

    config_dict = {
        "mysql_sh": {
            "config_file": "/etc/profile.d/mysql.sh",
            "config_context": mysql_sh_context,
            "mode": "w"
        },
        "my_cnf": {
            "config_file": my_cnf_file,
            "config_context": my_cnf_context,
            "mode": "w"
        }
    }

    # slave配置
    if cluster_flag:
        if role == "slave":
            my_client_cnf = f"""\
                [mysqld]
                #replication
                ## master
                ## slave
                ### relay log
                relay_log={mysql_dir}/{my_logs}/relay/relay
            """
            sync_dbs_list = cluster_info_dict.get("sync_dbs")
            sync_dbs_config = ""
            for sync_db in sync_dbs_list:
                sync_dbs_config = f"{sync_dbs_config}\nreplicate_do_db={sync_db}"
            config_dict.update({
                "my_client_cnf": {
                    "config_file": my_client_cnf_file,
                    "config_context": my_client_cnf,
                    "mode": "w"
                },
                "my_sync_db_cnf": {
                    "config_file": my_client_cnf_file,
                    "config_context": sync_dbs_config,
                    "mode": "a"
                }
            })
    else:
        config_dict.update({
            "my_client_cnf": {
                "config_file": my_client_cnf_file,
                "config_context": "",
                "mode": "w"
            }
        })
    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)}")
    result, msg = common.config(config_dict)
    if not result:
        log.logger.error(msg)
        return_value = error_code
    return return_value
예제 #16
0
파일: redis5.py 프로젝트: xhsky/autodep
def install():
    """安装
    """
    return_value = normal_code
    pkg_file = conf_dict["pkg_file"]
    value, msg = common.install(pkg_file, redis_src, redis_dst, None, located)
    if not value:
        log.logger.error(msg)
        sys.exit(error_code)

    redis_mem = redis_info_dict["db_info"].get("redis_mem")
    # 环境配置
    log.logger.debug("环境配置")
    sysctl_conf_file = "/etc/sysctl.d/redis.conf"
    sysctl_conf_text = """\
            net.core.somaxconn=2048
            vm.overcommit_memory=1
    """
    redis_sh_text = f"""\
            export REDIS_HOME={redis_dir}
            export PATH=$REDIS_HOME/bin:$PATH
    """
    hugepage_disabled = f"echo never > /sys/kernel/mm/transparent_hugepage/enabled\n"
    config_dict = {
        "sysctl_conf": {
            "config_file": sysctl_conf_file,
            "config_context": sysctl_conf_text,
            "mode": "w"
        },
        "rc_local": {
            "config_file": "/etc/rc.local",
            "config_context": hugepage_disabled,
            "mode": "r+"
        },
        "redis_sh": {
            "config_file": "/etc/profile.d/redis.sh",
            "config_context": redis_sh_text,
            "mode": "w"
        }
    }

    # redis配置, 根据主从配置redis文件
    log.logger.debug("配置redis")
    if redis_info_dict.get("cluster_info") is None:
        role = "stand-alone"
    else:
        cluster_info_dict = redis_info_dict["cluster_info"]
        role = cluster_info_dict.get("role")
    log.logger.debug(f"{role=}")

    if role == "stand-alone" or role == "master":
        slaveof_master_port = ""
    elif role == "slave":
        master_host = cluster_info_dict.get("master_host")
        master_port = cluster_info_dict.get("master_port")
        slaveof_master_port = f"slaveof {master_host} {master_port}"
    log.logger.debug(f"{slaveof_master_port=}")

    redis_conf_text = f"""\
            protected-mode no
            port {redis_port}
            tcp-backlog 511
            timeout 0
            tcp-keepalive 300
            daemonize yes
            supervised no
            pidfile "{redis_dir}/redis.pid"
            loglevel notice
            logfile "{redis_dir}/logs/redis.log"
            # syslog-enabled no
            # syslog-ident redis
            # syslog-facility local0
            databases 16
            always-show-logo yes

            save 900 1
            save 300 10
            save 60 10000

            stop-writes-on-bgsave-error no
            rdbcompression yes
            rdbchecksum yes

            dbfilename "dump.rdb"
            dir "{redis_dir}/data"

            {slaveof_master_port}

            masterauth "{password}"
            requirepass "{password}"
            replica-serve-stale-data yes

            replica-read-only yes

            repl-diskless-sync no

            repl-diskless-sync-delay 5
            # repl-ping-slave-period 10
            # repl-timeout 60
            repl-disable-tcp-nodelay no
            # repl-backlog-size 1mb
            # repl-backlog-ttl 3600
            replica-priority 100

            # maxclients 10000
            maxmemory {redis_mem}
            # maxmemory-policy noeviction

            lazyfree-lazy-eviction no
            lazyfree-lazy-expire no
            lazyfree-lazy-server-del no
            replica-lazy-flush no

            appendonly no
            appendfilename "appendonly.aof"
            appendfsync everysec
            # appendfsync no
            no-appendfsync-on-rewrite no
            auto-aof-rewrite-percentage 100
            auto-aof-rewrite-min-size 64mb
            aof-load-truncated yes
            aof-use-rdb-preamble no

            lua-time-limit 5000

            # cluster-enabled yes
            # cluster-config-file nodes-6379.conf
            # cluster-node-timeout 15000
            # cluster-slave-validity-factor 10
            # cluster-migration-barrier 1
            # cluster-require-full-coverage yes

            slowlog-max-len 128
            latency-monitor-threshold 0

            hash-max-ziplist-entries 512
            hash-max-ziplist-value 64

            list-max-ziplist-size -2
            list-compress-depth 0
            set-max-intset-entries 512
            zset-max-ziplist-entries 128
            zset-max-ziplist-value 64

            hll-sparse-max-bytes 3000
            activerehashing yes
            client-output-buffer-limit normal 0 0 0
            client-output-buffer-limit replica 256mb 64mb 60
            client-output-buffer-limit pubsub 32mb 8mb 60
            hz 10
            aof-rewrite-incremental-fsync yes
            """
    config_dict.update({
        "redis_conf": {
            "config_file": f"{redis_dir}/conf/redis.conf",
            "config_context": redis_conf_text,
            "mode": "w"
        }
    })

    # Sentinel配置
    if sentinel_flag:
        log.logger.debug("配置sentinel")
        #sentinel_port=sentinel_info.get("sentinel_port")
        monitor_host = sentinel_info.get("monitor_host")
        monitor_port = sentinel_info.get("monitor_port")

        sentinel_conf_text = f"""\
                protected-mode no
                port {sentinel_port}
                daemonize yes
                dir "{redis_dir}/data"
                logfile "{redis_dir}/logs/sentinel.log"
                sentinel monitor mymaster {monitor_host} {monitor_port} 1
                sentinel auth-pass mymaster {password}
                sentinel deny-scripts-reconfig yes
                sentinel down-after-milliseconds mymaster 5000
        """
        config_dict.update({
            "sentinel_conf": {
                "config_file": f"{redis_dir}/conf/sentinel.conf",
                "config_context": sentinel_conf_text,
                "mode": "w"
            }
        })

    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)}")
    result, msg = common.config(config_dict)
    if result:
        command = f"sysctl -p {sysctl_conf_file} && echo never > /sys/kernel/mm/transparent_hugepage/enabled"
        log.logger.debug(f"刷新配置: {command=}")
        result = common.exec_command(command)
        if not result:
            log.logger.error(msg)
            return_value = error_code
    else:
        log.logger.error(msg)
        return_value = error_code
    return return_value
예제 #17
0
파일: redis6.py 프로젝트: xhsky/autodep
def install():
    """安装
    """
    return_value = normal_code
    pkg_file = conf_dict["pkg_file"]
    value, msg = common.install(pkg_file, redis_src, redis_dst, None, located)
    if not value:
        log.logger.error(msg)
        sys.exit(error_code)

    redis_mem = redis_info_dict["db_info"].get("redis_mem")
    # 环境配置
    log.logger.debug("环境配置")
    sysctl_conf_file = "/etc/sysctl.d/redis.conf"
    sysctl_conf_text = """\
            net.core.somaxconn=2048
            vm.overcommit_memory=1
    """
    redis_sh_text = f"""\
            export DCH_HOME={redis_dir}
            export PATH=$DCH_HOME/bin:$PATH
    """
    hugepage_disabled = f"echo never > /sys/kernel/mm/transparent_hugepage/enabled\n"
    config_dict = {
        "sysctl_conf": {
            "config_file": sysctl_conf_file,
            "config_context": sysctl_conf_text,
            "mode": "w"
        },
        "rc_local": {
            "config_file": "/etc/rc.local",
            "config_context": hugepage_disabled,
            "mode": "r+"
        },
        "redis_sh": {
            "config_file": "/etc/profile.d/redis.sh",
            "config_context": redis_sh_text,
            "mode": "w"
        }
    }

    # redis配置, 根据主从配置redis文件
    log.logger.debug("配置redis")
    if redis_info_dict.get("cluster_info") is None:
        role = "stand-alone"
    else:
        cluster_info_dict = redis_info_dict["cluster_info"]
        role = cluster_info_dict.get("role")
    log.logger.debug(f"{role=}")

    if role == "stand-alone" or role == "master":
        slaveof_master_port = ""
    elif role == "slave":
        master_host = cluster_info_dict.get("master_host")
        master_port = cluster_info_dict.get("master_port")
        slaveof_master_port = f"replicaof {master_host} {master_port}"
    log.logger.debug(f"{slaveof_master_port=}")

    redis_io_threads = redis_info_dict["db_info"]["redis_io_threads"]
    redis_conf_text = f"""\
            # NETWORK
            bind 0.0.0.0
            protected-mode yes
            port {redis_port}
            tcp-backlog 511
            ## unixsocket /tmp/redis.sock
            ## unixsocketperm 700
            timeout 0
            tcp-keepalive 300
            
            # GENERAL
            daemonize yes
            supervised no
            pidfile {redis_dir}/redis.pid
            databases 16
            always-show-logo yes                             
            
            # log
            loglevel notice
            logfile "{redis_dir}/logs/redis.log"
            ## syslog-enabled no
            ## syslog-ident redis
            ## syslog-facility local0
            
            slowlog-log-slower-than 10000
            slowlog-max-len 128
            
            # SNAPSHOTTING
            dir {redis_dir}/data
            
            save 900 1
            save 300 10
            save 60 10000
            
            stop-writes-on-bgsave-error yes
            rdbcompression yes
            rdbchecksum yes
            dbfilename dump.rdb
            rdb-del-sync-files no                      
            
            # REPLICATION 
            # repl-timeout 60
            # master
            repl-diskless-sync no
            repl-diskless-sync-delay 5
            repl-disable-tcp-nodelay yes
            repl-backlog-size 1mb
            repl-backlog-ttl 3600
            
            # slave
            {slaveof_master_port}
            masterauth {redis_password}
            # masteruser <username>
            # repl-ping-replica-period 10
            repl-diskless-load disabled
            replica-serve-stale-data yes
            replica-read-only yes
            replica-priority 100
            # min-replicas-to-write 3
            # min-replicas-max-lag 10
            # replica-announce-ip 5.5.5.5
            # replica-announce-port 1234
            # replica-ignore-maxmemory yes

            
            # KEYS TRACKING
            # tracking-table-max-keys 1000000         
            
            # SECURITY 
            acllog-max-len 128
            # aclfile /dream/redis/conf/users.acl
            requirepass {redis_password}
            
            # CLIENTS 
            # maxclients 10000
            
            # MEMORY MANAGEMENT	
            maxmemory {redis_mem}
            # maxmemory-policy noeviction
            # maxmemory-samples 5
            # active-expire-effort 1
            
            # LAZY FREEING 
            lazyfree-lazy-eviction no
            lazyfree-lazy-expire no
            lazyfree-lazy-server-del no
            replica-lazy-flush no
            lazyfree-lazy-user-del no
            
            # THREADED I/O
            io-threads {redis_io_threads}
            io-threads-do-reads no 
            # server_cpulist 0-7:2	
            # bio_cpulist 1,3	
            # aof_rewrite_cpulist 8-11
            # bgsave_cpulist 1,10-11
            
            # KERNEL OOM CONTROL  
            oom-score-adj no		
            oom-score-adj-values 0 200 800
            
            # APPEND ONLY MODE
            appendonly no
            appendfilename "appendonly.aof"
            appendfsync everysec
            no-appendfsync-on-rewrite no
            auto-aof-rewrite-percentage 100
            auto-aof-rewrite-min-size 64mb
            aof-load-truncated yes
            aof-use-rdb-preamble yes                 
            
            # LUA SCRIPTING
            lua-time-limit 5000
            
            # DCH CLUSTER
            # cluster-enabled yes
            # cluster-config-file nodes-6379.conf
            # cluster-node-timeout 15000
            # cluster-replica-validity-factor 10
            # cluster-migration-barrier 1
            # cluster-require-full-coverage yes
            # cluster-replica-no-failover no
            # cluster-allow-reads-when-down no
            # cluster-announce-ip 10.1.1.5
            # cluster-announce-port 6379
            # cluster-announce-bus-port 6380
            
            # LATENCY MONITOR
            latency-monitor-threshold 0
            
            # EVENT NOTIFICATION
            notify-keyspace-events ""       
            # GOPHER SERVER
            # gopher-enabled no             
            
            # ADVANCED CONFIG 
            hash-max-ziplist-entries 512
            hash-max-ziplist-value 64
            list-max-ziplist-size -2
            list-compress-depth 0
            set-max-intset-entries 512
            zset-max-ziplist-entries 128
            zset-max-ziplist-value 64
            hll-sparse-max-bytes 3000
            stream-node-max-bytes 4096
            stream-node-max-entries 100
            activerehashing yes
            client-output-buffer-limit normal 0 0 0
            client-output-buffer-limit replica 256mb 64mb 60
            client-output-buffer-limit pubsub 32mb 8mb 60
            # client-query-buffer-limit 1gb
            # proto-max-bulk-len 512mb
            hz 10
            dynamic-hz yes
            aof-rewrite-incremental-fsync yes
            rdb-save-incremental-fsync yes
            # lfu-log-factor 10
            # lfu-decay-time 1
            
            # ACTIVE DEFRAGMENTATION
            # activedefrag no
            # active-defrag-ignore-bytes 100mb
            # active-defrag-threshold-lower 10
            # active-defrag-threshold-upper 100
            # active-defrag-cycle-min 1
            # active-defrag-cycle-max 25
            # active-defrag-max-scan-fields 1000
            jemalloc-bg-thread yes
            ignore-warnings ARM64-COW-BUG
            """
    config_dict.update({
        "redis_conf": {
            "config_file": f"{redis_dir}/conf/redis.conf",
            "config_context": redis_conf_text,
            "mode": "w"
        }
    })

    # Sentinel配置
    if sentinel_flag:
        log.logger.debug("配置sentinel")
        monitor_host = sentinel_info.get("monitor_host")
        monitor_port = sentinel_info.get("monitor_port")
        replicas_num = len(sentinel_info.get("replicas_members"))

        if replicas_num <= 2:
            quorum = 1
        elif (replicas_num % 2) == 0:
            quorum = replicas_num / 2
        else:
            quorum = int(replicas_num / 2) + 1
        sentinel_conf_text = f"""\
                protected-mode no
                port {sentinel_port}
                daemonize yes
                dir "{redis_dir}/data"
                logfile "{redis_dir}/logs/sentinel.log"
                pidfile {redis_dir}/sentinel.pid

                {sentinel_password_str}
                #sentinel sentinel-user <username>
                #sentinel sentinel-pass <password>

                sentinel monitor mymaster {monitor_host} {monitor_port} {quorum}
                sentinel auth-pass mymaster {redis_password}
                sentinel deny-scripts-reconfig yes
                sentinel down-after-milliseconds mymaster 5000
                sentinel failover-timeout mymaster 180000

                sentinel resolve-hostnames yes
                sentinel announce-hostnames no
        """
        config_dict.update({
            "sentinel_conf": {
                "config_file": f"{redis_dir}/conf/sentinel.conf",
                "config_context": sentinel_conf_text,
                "mode": "w"
            }
        })

    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)}")
    result, msg = common.config(config_dict)
    if result:
        command = f"sysctl -p {sysctl_conf_file} && echo never > /sys/kernel/mm/transparent_hugepage/enabled"
        log.logger.debug(f"刷新配置: {command=}")
        result = common.exec_command(command)
        if not result:
            log.logger.error(msg)
            return_value = error_code
    else:
        log.logger.error(msg)
        return_value = error_code
    return return_value
예제 #18
0
파일: glusterfs.py 프로젝트: xhsky/autodep
def install():
    pkg_file = conf_dict["pkg_file"]
    value, msg = common.install(pkg_file, glusterfs_src, glusterfs_dst,
                                pkg_dir, located)
    if not value:
        log.logger.error(msg)
        return error_code

    if server_flag == 1:
        glusterd_conf_context = f"""\
                volume management
                    type mgmt/glusterd
                    option working-directory /var/lib/glusterd
                    option transport-type socket,rdma
                    option transport.socket.keepalive-time 10
                    option transport.socket.keepalive-interval 2
                    option transport.socket.read-fail-log off
                    option transport.socket.listen-port {glusterd_port}
                    option transport.rdma.listen-port 24008
                    option ping-timeout 0
                    option event-threads 1
                #   option lock-timer 180
                #   option transport.address-family inet6
                    option base-port {volume_port}
                    option max-port  60999
                end-volume
                """
        glusterd_conf_file = "/etc/glusterfs/glusterd.vol"
        config_dict = {
            "glusterd_conf": {
                "config_file": glusterd_conf_file,
                "config_context": glusterd_conf_context,
                "mode": "w"
            }
        }
    elif server_flag == 2:
        try:
            log.logger.debug(f"创建挂载目录: {mounted_dir}")
            os.makedirs(mounted_dir, exist_ok=1)

            mounted_str = f"{mounted_host}:{glusterfs_volume_name} {mounted_dir} glusterfs defaults 0 0\n"
            fstab_file = "/etc/fstab"
            with open(fstab_file, "r") as f:
                text = f.readlines()
            if mounted_str not in text:
                config_dict = {
                    "gluster_client_conf": {
                        "config_file": fstab_file,
                        "config_context": mounted_str,
                        "mode": "a"
                    }
                }
            else:
                config_dict = {}
        except Exception as e:
            log.logger.error(str(e))
            return error_code

    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)}")
    result, msg = common.config(config_dict)
    if result:
        if server_flag == 1:  # server需要提前启动
            return glusterd_start()
    else:
        log.logger.error(msg)
        return error_code
예제 #19
0
def install():
    """安装
    """
    return_value = normal_code
    pkg_file = conf_dict["pkg_file"]
    command = "id -u elastic > /dev/null 2>&1 || useradd -m -s /bin/bash elastic"
    log.logger.debug(f"创建用户: {command=}")
    result, msg = common.exec_command(command)
    if not result:
        log.logger.error(msg)
        return error_code
    value, msg = common.install(pkg_file, elasticsearch_src, elasticsearch_dst,
                                elasticsearch_pkg_dir, located)
    if not value:
        log.logger.error(msg)
        return error_code

    # 配置
    ## es配置
    jvm_mem = conf_dict["elasticsearch_info"]["jvm_mem"]
    cluster_name = conf_dict["elasticsearch_info"]["cluster_name"]
    members_list = conf_dict["elasticsearch_info"]["members"]

    es_config_text = f"""\
        cluster.name: {cluster_name}
        #"node.name: es_node
        node.master: true
        node.voting_only: false
        node.data: true
        node.ingest: true
        bootstrap.memory_lock: true
        network.host: 0.0.0.0
        http.port: {http_port}
        discovery.seed_hosts: {members_list}
        transport.tcp.port: {transport}
        cluster.initial_master_nodes: {members_list}
        gateway.recover_after_nodes: 1
        action.destructive_requires_name: true
    """

    jvm_config_file = f"{es_dir}/config/jvm.options.d/jvm_mem.options"
    jvm_context = f"""\
            -Xms{jvm_mem}
            -Xmx{jvm_mem}
    """
    ''' es版本更新, jvm内存更改变化
    with open(jvm_config_file, "r+") as f:
        raw_text=f.readlines()
        xms_index=raw_text.index('-Xms1g\n')
        xmx_index=raw_text.index('-Xmx1g\n')
        raw_text[xms_index]=f"-Xms{jvm_mem}\n"
        raw_text[xmx_index]=f"-Xmx{jvm_mem}\n"
        f.seek(0)
        f.writelines(raw_text)
    '''

    add_java_file = f"{es_dir}/bin/elasticsearch-env"  # 将es自身的java环境写入脚本, 防止与其他JAVA_HOME变量冲突
    with open(add_java_file, "r+") as f:
        raw_text = f.readlines()
        java_home = f"export ES_JAVA_HOME={es_dir}/jdk\n"
        raw_text.insert(2, java_home)
        f.seek(0)
        f.writelines(raw_text)

    ## 环境配置
    limit_conf_context = """\
        elastic    -   memlock unlimited
        elastic    -   fsize   unlimited
        elastic    -   as  unlimited
        elastic    -   nofile  65536
        elastic    -   nproc   65536
        """
    sysctl_conf_context = "vm.max_map_count=262144"
    sysctl_conf_file = "/etc/sysctl.d/es.conf"

    config_dict = {
        "jvm": {
            "config_file": jvm_config_file,
            "config_context": jvm_context,
            "mode": "w"
        },
        "limit_conf": {
            "config_file": "/etc/security/limits.d/elastic.conf",
            "config_context": limit_conf_context,
            "mode": "w"
        },
        "sysctl_conf": {
            "config_file": sysctl_conf_file,
            "config_context": sysctl_conf_context,
            "mode": "w"
        },
        "es_config_text": {
            "config_file": f"{es_dir}/config/elasticsearch.yml",
            "config_context": es_config_text,
            "mode": "w"
        }
    }
    log.logger.debug(f"写入配置文件: {json.dumps(config_dict)}")
    result, msg = common.config(config_dict)
    if result:
        command = f"chown -R elastic:elastic {located}/{elasticsearch_src}*  && sysctl -p {sysctl_conf_file}"
        log.logger.debug(f"配置环境: {command=}")
        result, msg = common.exec_command(command)
        if not result:
            log.logger.error(msg)
            return_value = error_code
    else:
        log.logger.error(msg)
        return_value = error_code
    return return_value