def start_check(self): if ser_port == "": return True for i in range(40): rcmd="sudo /usr/sbin/lsof -i:%s | grep -i LISTEN" % ser_port out = run_command(rcmd) if out == "": time.sleep(1) continue else: logger_root.info("[%s] 服务起来啦!" % self.host) return True
def check_status(self): #检测应用启动后是否报错 logger_root.info("执行check_status函数!") if docker_flag == "1" or docker_flag == "2": for i in range(15): rcmd = '''sudo sh -c "docker exec -i %s ls /usr/local/tomcat/logs/|grep catalina.`date "+%%Y-%%m-%%d"`.log"|grep -v old''' % mod_name logger_root.info(rcmd) out = run_command(rcmd) if out != "": rcmd='''sudo sh -c " docker exec -i %s tail -n 10 /usr/local/tomcat/logs/catalina.`date "+%%Y-%%m-%%d"`.log|grep 'Server startup'" ''' % mod_name logger_root.info(rcmd) out=run_command(rcmd) if out == "": if i == 14: logger_root.error("[%s] 启动失败,未检测到'Server startup'" % self.host) return False time.sleep(10) else: return True else: if i == 14: logger_root.error("[%s] 启动失败,未生成日志文件catalina" % self.host) return False time.sleep(5) else: if type == "java" or type == "war": self.webapp=cf.get(self.mod_name,"tomcat_path") logger_root.info(self.webapp) rcmd="ps aux|grep %s|grep -v grep|awk '{print $2}'" % (self.webapp) out = run_command(rcmd) if out != '': #rcmd='grep -e -i -A500 '%s' %s/logs/catalina.out|grep -e 'Exception|error' %s/logs/catalina.out ' % (self.time,self.webapp) #rcmd='''tail -n 2000 %s/logs/catalina.out|egrep -i -A50 -B30 'Exception|error' ''' % (self.webapp) rcmd='''while :; do tail -n 10 %s/logs/catalina.out|grep -i -A20 'Exception|error'; tail -n 10 %s/logs/catalina.out |grep 'Server startup' && exit; done ''' % (self.webapp,self.webapp) outlog=run_command(rcmd) logger_root.info(outlog) elif type == "jar": rcmd="ps aux|grep %s|grep -v grep|awk '{print $2}'" % (self.mod_name) out = run_command(rcmd) if out != '': rcmd='''tail -n 2000 %s/logs/err|egrep -i -A50 -B30 'Exception|error' ''' % (self.webapp) outlog=run_command(rcmd) logger_root.error(outlog) logger_console.error(outlog) elif type == "nodejs": pass else: return 1
def restart(self): # 重启模块 if auto: if docker_flag == "1": #logger_root.info("[%s]执行加注释函数!" % self.host) #nginx_mod.add(self.host) #time.sleep(20) #restart方式: rcmd = "sudo docker restart %s" % mod_name logger_root.info(rcmd) run_command(rcmd) #run方式(备用): #container_run.restart_func() #self.check_start_status() else: self.stop_program() self.start_program()
def check_server(self): if auto: logger_root.info("*"*10) if mod_name == "common": check_all_server() if mod_name == "mysql": logger_root.info("[%s] 开始检测!" % self.host) check_mod.check_mysql() elif mod_name == "redis": logger_root.info("[%s] 开始检测!" % self.host) check_mod.check_redis() elif mod_name == "gxb-sso" and check_mod.check_login(): logger_root.info("[%s] API 调用成功!" % self.host) elif check_mod.check_status(): logger_root.info("[%s] API 调用成功!" % self.host) else: logger_root.error("[%s] API 调用不成功!" % self.host) logger_console.error("[%s] API 调用不成功!" % self.host)
def run_command(cmd,user=user,port=port,password=password,host=host,stdout="stdout",): logger_root.info('start exec command %s' % cmd) client=paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) client.load_system_host_keys() port=int(port) #logger_root.info('start connect %s,%s,%s,%s' % (host,port,user,password) ) client.connect(hostname=host, port=port,username=user,password=password,timeout=10) stdin,stdout,stderr = client.exec_command(cmd) stdin.write("%s\n" % password) #这两行是执行sudo命令要求输入密码时需要的 stdin.flush() #执行普通命令的话不需要这两行 logger_root.error(stderr.read()) logger_console.error(stderr.read()) if stdout == "stdout": logger_root.info(stdout.read()) else: return stdout.read() client.close()
def backup(self): #备份操作 logger_root.info("backup start") os.path.exists(local_backup_file_prefix) or os.makedirs(local_backup_file_prefix) #backup_cmd="scp -P %s -r %s@%s:%s %s" % (self.port,self.user,self.host,remote_dst_file,local_backup_file_prefix) backup_cmd="rsync -e 'ssh -p %s' -avz --exclude=logs/ %s@%s:%s %s" % (self.port,self.user,self.host,remote_dst_file,local_backup_file_prefix) logger_root.info(backup_cmd) outfile=pexpect.run (backup_cmd, events={'(?i)password': self.password+'\n','continue connecting (yes/no)?':'yes\n'},timeout=None) logger_root.info(outfile) logger_root.info("%s backup successful!" % self.mod_name) sys.exit()
def check_start_status(self): if self.start_check() and self.check_status(): time.sleep(3) for i in range(5): if mod_name == "gxb-sso" and check_mod.check_login(): logger_root.info("[%s] API 调用成功!" % self.host) start_flag = True break elif check_mod.check_status(): logger_root.info("[%s] API 调用成功!" % self.host) start_flag = True break else: start_flag = False time.sleep(10) continue if not start_flag: logger_root.error("[%s] API 调用不成功!" % self.host) logger_console.error("[%s] API 调用不成功!" % self.host) if action != "gray_update" and start_flag: if mod_name != "gxb-scheduler": # 调用nginx减注释方法,在发版主机启动后取消nginx注释 logger_root.info("[%s]执行解注释函数!" % self.host) nginx_mod.dec(self.host) else: logger_root.error("[%s]未检测到程序端口[%s]或者API调用失败,程序启动失败!" % (self.host, ser_port)) logger_console.error("[%s]未检测到程序端口[%s]或者API调用失败,程序启动失败!" % (self.host, ser_port))
def stop_program(self): #调用nginx加注释方法,在发版主机stop之前先在nginx的配置文件里将其注释 logger_root.info("[%s]执行加注释函数!" % self.host) nginx_mod.add(self.host) time.sleep(40) logger_root.info('start sleep') #time.sleep(10) #time.sleep(180) #关闭应用 if stop_cmd: rcmd=stop_cmd if type == "java" or type == "war": self.webapp=cf.get(self.mod_name,"tomcat_path") rcmd='''pid=`ps aux|grep %s|grep -v grep|awk '{print $2}'`;[ -n "$pid" ] && kill -9 $pid ; rm -rf %s/work/Catalina/''' % (self.webapp,self.webapp) elif type == "jar": rcmd='''pid=`ps aux|grep %s|grep -v grep|awk '{print $2}'`;[ -n "$pid" ] && kill -9 $pid ''' % (self.mod_name) elif type == "c": self.pname=remote_dst_file.split("/")[-1] rcmd='''pid=`ps aux|grep %s|grep -v grep|awk '{print $2}'`;[ -n "$pid" ] && kill -9 $pid ''' % self.pname elif type == "php": rcmd='''sh /home/kkb/start.sh''' elif type == "nodejs": rcmd='''ps aux|grep $(ls -rt %s/*.js|awk -F'/' '{print $NF}')|grep -v grep |awk '{print $2}'|xargs -I A kill -9 A ''' % remote_dst_file else: return 1 logger_root.info(rcmd) run_command(rcmd)
def git_mod(self): api_type = self.mod_name if cf.has_option(api_type, "git_ip"): git_host = cf.get(api_type, "git_ip") else: logger_root.error("必须设置本地仓库机器的ip!") logger_console.error("必须设置本地仓库机器的ip!") sys.exit() (status,output)=commands.getstatusoutput('fab -H %s -f %slib/fabfile.py go:%s' % (git_host,src_dir_prefix,api_type)) if log_detail == "True": logger_root.info(output) logger_root.info("#"*30) if status == 0: logger_root.info("模块%s上传war包成功!" % mod_name) else: logger_root.error("模块%s上传war包失败!" % mod_name) logger_console.error("模块%s上传war包失败!" % mod_name) sys.exit()
def find_file_replace(self): if auto: grep_list=[] rootdir = cf.get(self.mod_name,"tomcat_path") rcmd = "find %s -name %s -type f" %(rootdir,file_name) out = run_command(rcmd) if out == "":sys.exit("dont find files!") FileList = out.split("\n") logger_root.info("filename: %s" % str(FileList)) for filename in FileList: if filename != "": rcmd = "grep %s %s" % (old_content,filename) grep_out = run_command(rcmd) if grep_out != "":grep_list.append(grep_out) rcmd = "sed -i 's;%s;%s;g' %s" %(old_content,new_content,filename) logger_root.info(rcmd) run_command(rcmd) self.stop_program() self.start_program() logger_root.info(grep_list)
def rollback(self,version=version): #回滚 #如果没有指定版本,找出时间最近的一次版本进行回滚 logger_root.info("start rollback") if docker_flag == "1": container_run.rollback_func(version,docker_flag) elif docker_flag == "2": k8s_container.rollback_func(version,docker_flag) else: if not version: local_backup_mod_dir=local_backup_dir_prefix + mod_name + "/" cmd='''ls -rt %s|tail -2|head -1''' % local_backup_mod_dir version=os.popen(cmd).read().rstrip() #回滚目录 self.back_dir=local_backup_dir_prefix + mod_name + "/" + version + "/" self.stop_program() rcmd="rsync -e 'ssh -p %s' -avz --exclude-from=%s %s %s@%s:%s" % (self.port,exclude_file,self.back_dir + mod_name + "/",self.user,self.host,remote_dst_file+"/") logger_root.info(rcmd) outfile=pexpect.run (rcmd, events={'(?i)password': self.password+'\n','continue connecting (yes/no)?':'yes\n'},timeout=None) logger_root.info(outfile) self.start_program()
def scp_source_package_to_local(self): #一键更新,从远程主机拷贝模块目录 同步到线上目录 self.is_compress = 'False' logger_root.info("scp_source_package_to_local") #如果本地主机有模块目录/jar包或者备份目录有更新包,则可以直接更新 无需从远程主机拷贝目录 if os.path.exists("%s" % upload_unzip_dir) or os.path.exists("%s" % upload_dir + mod_name + ".jar") or os.path.exists(local_backup_file_prefix): return 0 #获取source server变量 if cf.has_option(mod_name,'source_host') and cf.has_option(mod_name,'source_path') and cf.has_option(mod_name,'source_user') and cf.has_option(mod_name,'source_password'): if cf.has_option(mod_name,'source_port'): source_port = cf.get(mod_name,'source_port') else: source_port = 22 source_host = cf.get(mod_name,'source_host') source_user = cf.get(mod_name,'source_user') source_password = cf.get(mod_name,'source_password') source_path =cf.get(mod_name,'source_path') #从source_host拷贝jar包(只拷贝时间最近的一个包) if type == "jar" or type == "war": cmd="cd %s;echo $(ls -rt *.%s|tail -1)" % (source_path,type) filename=run_command(cmd,user=source_user,port=source_port,password=source_password,host=source_host,stdout="file") source_path = cf.get(mod_name,'source_path') + filename backup_cmd="scp -q -P%s -r %s@%s:%s %s" % (source_port,source_user,source_host,source_path,upload_dir + mod_name + "." + type) #从source_host拷贝模块目录 else: source_path = cf.get(mod_name,'source_path') #backup_cmd="scp -q -P %s -r %s@%s:%s %s" % (source_port,source_user,source_host,source_path,upload_unzip_dir) backup_cmd="rsync -q -e 'ssh -p %s' -avz --exclude=logs/ --exclude=log/ %s@%s:%s %s" % (source_port,source_user,source_host,source_path+"/",upload_unzip_dir) logger_root.info(backup_cmd) try: outfile=pexpect.run (backup_cmd, events={'(?i)password': source_password+'\n','continue connecting (yes/no)?':'yes\n'},timeout=None) logger_root.info(outfile) except Exception as e: print e else: logger_root.error("You want make it auto update ,Make sure you define source_host/source_path/source_user/source_password") logger_console.error("You want make it auto update ,Make sure you define source_host/source_path/source_user/source_password") sys.exit()
def update(self): #同步更新到远程服务器 if auto: if docker_flag == "1" or docker_flag == "2": if mod_name != "gxb-scheduler": logger_root.info("[%s]执行加注释函数!" % self.host) nginx_mod.add(self.host) time.sleep(2) if docker_git == "1": ver=container_run.image2_func() elif docker_git == "2": if not os.path.exists(local_backup_file_prefix): self.git_mod() self.mv_upload_file_to_backup_dir() if image_flag == 0: remote_dst = docker_path + "/" + mod_name rcmd = "rsync -e 'ssh -p %s' -avz %s %s@%s:%s" % ( docker_port, local_backup_file_prefix, docker_user, docker_ip, remote_dst + "/") logger_root.info(rcmd) outfile = pexpect.run(rcmd, events={'(?i)password': docker_pwd + '\n', 'continue connecting (yes/no)?': 'yes\n'}, timeout=None) ver = image.image_func() container_run.container_func(ver) if show_flag: ver_path=src_dir_prefix + "/log/version.txt" with open(ver_path,"a+") as f: f.write("%s:%s\n" % (self.mod_name,ver)) if type == "war": self.check_start_status() self.confirm() elif docker_flag == "3": if docker_git == "1": ver = k8s_container.k8s_image_func() elif docker_git == "2": if not os.path.exists(local_backup_file_prefix): self.git_mod() self.mv_upload_file_to_backup_dir() if image_flag == 0: remote_dst = docker_path + "/" + mod_name rcmd = "rsync -e 'ssh -p %s' -avz %s %s@%s:%s" % ( docker_port, local_backup_file_prefix, docker_user, docker_ip, remote_dst + "/") logger_root.info(rcmd) outfile = pexpect.run(rcmd, events={'(?i)password': docker_pwd + '\n', 'continue connecting (yes/no)?': 'yes\n'}, timeout=None) ver = image.image_func() k8s_container.k8s_func(ver) else: if git_enabled == "yes": if not os.path.exists(local_backup_file_prefix): self.git_mod() else: self.scp_source_package_to_local() self.mv_upload_file_to_backup_dir() logger_root.info('start stop program') self.stop_program() logger_root.info('stop program ok') rcmd='[ -d %s ] || mkdir -p %s' % (remote_dst_file,remote_dst_file) logger_root.info(rcmd) run_command(rcmd) rcmd="rsync -e 'ssh -p %s' -avz --exclude-from=%s %s %s@%s:%s" % (self.port,exclude_file,local_backup_file_prefix,self.user,self.host,remote_dst_file+"/") logger_root.info(rcmd) outfile=pexpect.run (rcmd, events={'(?i)password': self.password+'\n','continue connecting (yes/no)?':'yes\n'},timeout=None) self.confirm(remote_dst_file) if mod_name == "gxb-web" and self.host == "web1": rcmd = "sudo sed -i 's/10.44.145.219[ \t]*api/100.98.139.47\tapi/g' /etc/hosts" run_command(rcmd) logger_root.info(rcmd) rcmd = "sudo sed -i '/cas.gaoxiaobang.com/d' /etc/hosts" elif mod_name == "cms-web" and self.host == "cms1": rcmd = "sudo sed -i 's/10.44.145.219[ \t]*cms-api/100.98.139.47\tcms-api/g' /etc/hosts" run_command(rcmd) logger_root.info(rcmd) rcmd = "sudo sed -i '/cas.gaoxiaobang.com/d' /etc/hosts" elif mod_name == "cms-user" and self.host == "user1": rcmd = "sudo sed -i 's/10.44.145.219[ \t]*cms-api/100.98.139.47\tcms-api/g' /etc/hosts" elif mod_name == "hybird-web" and self.host == "hybird1": rcmd = "sudo sed -i 's/10.44.145.219[ \t]*api/100.98.139.47\tapi/g' /etc/hosts" run_command(rcmd) logger_root.info(rcmd) rcmd = "sudo sed -i 's/10.44.145.219[ \t]*cms-api/100.98.139.47\tcms-api/g' /etc/hosts" run_command(rcmd) logger_root.info(rcmd) rcmd = "sudo sed -i 's/10.44.145.219[ \t]*app/100.98.139.47\tapp/g' /etc/hosts" elif mod_name == "wechat" and self.host == "chat1": rcmd = "sudo sed -i 's/10.44.145.219[ \t]*api/100.98.139.47\tapi/g' /etc/hosts" run_command(rcmd) logger_root.info(rcmd) rcmd = "sudo sed -i 's/10.44.145.219[ \t]*app/100.98.139.47\tapp/g' /etc/hosts" run_command(rcmd) logger_root.info(rcmd) rcmd = "sudo sed -i 's/10.44.145.219[ \t]*cms-api/100.98.139.47\tcms-api/g' /etc/hosts" elif mod_name == "bi-web" and self.host == "bi1": rcmd = "sudo sed -i 's/10.44.145.219[ \t]*bi-api/100.98.139.47\tbi-api/g' /etc/hosts" run_command(rcmd) logger_root.info(rcmd) rcmd = "sudo sed -i '/cas.gaoxiaobang.com/d' /etc/hosts" else: rcmd = "" logger_root.info(rcmd) if rcmd != "": run_command(rcmd) self.start_program()
cp.set("handler_filehander","args",('/home/update/log/%s.log' % mod_name, 'a')) cp.write(open(log_conf,"w")) from lib.log import logger_root,logger_console from lib.addserver import AddServer from lib.nginx import nginx from lib.docker import docker from lib.check_status import CheckStatus,check_all_server cmd="ps aux|grep update.py |grep %s|grep %s|grep -v grep|wc -l" % (mod_name,action) out=subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE,stderr=subprocess.PIPE) if int(out.stdout.read()) > 1: logger_console.error("[%s]进程id已经存在,请不要重复[%s]!如需了解详情,请查看日志!" % (mod_name,action)) logger_root.error("[%s]进程id已经存在,请不要重复[%s]!如需了解详情,请查看日志!" % (mod_name,action)) sys.exit(0) logger_root.info("开始发版!!!!!") #如果没有指定模块名或者动作,打印错误并退出 if mod_name or action: pass else: logger_root.error('''you don't have mod_name and action!\nuse -h get some help''') logger_logger_console.error('''you don't have mod_name and action!\nuse -h get some help''') sys.exit() #如果模块不在模块列表中,打印错误信息并退出 if not cf.has_section(mod_name): logger_root.error("mod_name %s not in mod_list\nmod_list must in \n %s \n\n see %s get more information" % (mod_name,cf.sections(),mod_file)) logger_console.error("mod_name %s not in mod_list\nmod_list must in \n %s \n\n see %s get more information" % (mod_name,cf.sections(),mod_file)) sys.exit()
def mv_upload_file_to_backup_dir(self): #判断上传目录中是否有压缩包 #if cf.has_option(mod_name,'is_compress') and cf.get(mod_name,'is_compress') == 'True': logger_root.info("mv_upload_file_to_backup_dir %s" % self.host) #如果备份目录有更新包 则不用拷贝 if os.path.exists(local_backup_file_prefix): return 0 else: os.path.exists(local_backup_file_prefix) or os.makedirs(local_backup_file_prefix) if self.is_compress == 'True': if os.path.exists("%s" % self.upload_file_prefix+".tar.gz") or os.path.exists("%s" % self.upload_file_prefix+".zip"): #如果是压缩包先解压 #复制文件到本地同步目录 if type == "java": os.path.exists(local_backup_dir) or os.makedirs(local_backup_dir) logger_root.info("chdir",local_backup_dir) os.chdir(local_backup_dir) logger_root.info('mv %s.tar.gz %s 2>/dev/null||mv %s.zip %s 2>/dev/null' % (self.upload_file_prefix,local_backup_dir,self.upload_file_prefix,local_backup_dir)) os.system("mv %s.tar.gz %s 2>/dev/null||mv %s.zip %s 2>/dev/null " % (self.upload_file_prefix,local_backup_dir,self.upload_file_prefix,local_backup_dir)) elif type == "jar" or type == "war": os.chdir(local_backup_file_prefix) logger_root.info("chdir",local_backup_dir) os.system("mv %s %s" % (upload_dir + mod_name + "." + type,local_backup_file_prefix)) logger_root.info("mv %s %s" % (upload_dir + mod_name + "." + type,local_backup_file_prefix)) elif type == "c" or type == "php" or type == "nodejs": os.path.exists(self.local_backup_dir) or os.makedirs(self.local_backup_dir) os.chdir(self.local_backup_dir) os.system("mv %s.tar.gz %s 2>/dev/null||mv %s.zip %s 2>/dev/null " % (self.upload_file_prefix,self.local_backup_dir,self.upload_file_prefix,self.local_backup_dir)) logger_root.info("mv %s.tar.gz %s 2>/dev/null||mv %s.zip %s 2>/dev/null") else: logger_root.error("mod_type error") logger_console.error("mod_type error") sys.exit() #print os.path.abspath(os.path.curdir) os.chdir(local_backup_dir) logger_root.info("tar xzf %s.tar.gz 2> /dev/null||unzip %s.zip 2>/dev/null" % (mod_name,mod_name)) os.system("tar xzf %s.tar.gz 2> /dev/null||unzip %s.zip >/dev/null 2>&1" % (self.mod_name,self.mod_name)) os.system("rm -f %s.tar.gz 2>/dev/null;rm -f %s.zip >/dev/null 2>&1" % (self.mod_name,self.mod_name)) logger_root.info("rm -f %s.tar.gz 2>/dev/null;rm -f %s.zip 2>/dev/null" % (mod_name,mod_name)) if type == "c": os.system("[ -d %s ] && mv %s/* ./ && rmdir %s" % (self.mod_name,self.mod_name,self.mod_name)) else: logger_root.error("You compress flag is True,but your " + upload_dir + "can't find " + self.mod_name + ".zip or " + self.mod_name + ".tar.gz") logger_console.error("You compress flag is True,but your " + upload_dir + "can't find " + self.mod_name + ".zip or " + self.mod_name + ".tar.gz") sys.exit() elif type == "jar" or type == "war" or type == "nodejs": #如果没有压缩包 探测是否有jar或者war包 os.chdir(local_backup_file_prefix) logger_root.info("chdir",local_backup_dir) if type == "war": java_file="." elif type == "jar": java_file="-1.0-SNAPSHOT." if os.path.exists("%s" % upload_dir + mod_name + java_file + type): logger_root.info("mv %s %s" % (upload_dir + mod_name + java_file + type,local_backup_file_prefix)) os.system("mv %s %s" % (upload_dir + mod_name + java_file + type,local_backup_file_prefix)) else: logger_root.error(upload_dir + " can't find " + self.mod_name + java_file + type) logger_console.error(upload_dir + " can't find " + self.mod_name + java_file + type) sys.exit() else: #如果没有压缩包 是否有文件夹 if os.path.exists("%s" % upload_unzip_dir): os.system("mv %s %s" % (upload_unzip_dir,self.local_backup_dir)) #如果都没有 退出 else: logger_root.error("You compress flag is False,But " +upload_dir + " can't find " + self.mod_name + " directory") logger_console.error("You compress flag is False,But " +upload_dir + " can't find " + self.mod_name + " directory") sys.exit()
def gray_update(self): # 灰度发版 if auto: if docker_flag == "1": if not os.path.exists(local_backup_file_prefix): self.git_mod() self.mv_upload_file_to_backup_dir() if image_flag == 0: remote_dst = docker_path + "/" + mod_name rcmd = "rsync -e 'ssh -p %s' -avz %s %s@%s:%s" % ( docker_port, local_backup_file_prefix, docker_user, docker_ip, remote_dst + "/") logger_root.info(rcmd) outfile = pexpect.run(rcmd, events={'(?i)password': docker_pwd + '\n', 'continue connecting (yes/no)?': 'yes\n'}, timeout=None) global ver ver = image.image_func() container_run.container_func(ver) self.check_start_status() self.confirm() if docker_flag == "2": if not os.path.exists(local_backup_file_prefix): self.git_mod() self.mv_upload_file_to_backup_dir() if image_flag == 0: remote_dst = docker_path + "/" + mod_name rcmd = "rsync -e 'ssh -p %s' -avz %s %s@%s:%s" % ( docker_port, local_backup_file_prefix, docker_user, docker_ip, remote_dst + "/") logger_root.info(rcmd) outfile = pexpect.run(rcmd, events={'(?i)password': docker_pwd + '\n', 'continue connecting (yes/no)?': 'yes\n'}, timeout=None) global ver ver = image.image_func() k8s_container.k8s_func(ver) else: if git_enabled == "yes": if not os.path.exists(local_backup_file_prefix): self.git_mod() else: self.scp_source_package_to_local() self.mv_upload_file_to_backup_dir() self.stop_program() rcmd = '[ -d %s ] || mkdir -p %s' % (remote_dst_file, remote_dst_file) run_command(rcmd) rcmd = "rsync -e 'ssh -p %s' -avz --exclude-from=%s %s %s@%s:%s" % ( self.port, exclude_file, local_backup_file_prefix, self.user, self.host, remote_dst_file + "/") logger_root.info(rcmd) outfile = pexpect.run(rcmd, events={'(?i)password': self.password + '\n', 'continue connecting (yes/no)?': 'yes\n'}, timeout=None) self.confirm(remote_dst_file) if mod_name == "gxb-web" or mod_name == "hybird-web" or mod_name == "cms-web" or mod_name == "wechat": rcmd="sudo sed -i 's/100.98.139.47[ \t]*api/10.44.145.219\tapi/g' /etc/hosts" run_command(rcmd) logger_root.info(rcmd) if mod_name == "cms-web" or mod_name == "cms-user" or mod_name == "wechat" or mod_name == "hybird-web": rcmd="sudo sed -i 's/100.98.139.47[ \t]*cms-api/10.44.145.219\tcms-api/g' /etc/hosts" run_command(rcmd) logger_root.info(rcmd) if mod_name == "hybird-web" or mod_name == "wechat": rcmd = "sudo sed -i 's/100.98.139.47[ \t]*app/10.44.145.219\tapp/g' /etc/hosts" logger_root.info(rcmd) run_command(rcmd) if mod_name == "gxb-web" or mod_name == "cms-web" or mod_name == "bi-web": rcmd = "sudo sed -i '$a 10.44.145.219\tcas.gaoxiaobang.com' /etc/hosts" logger_root.info(rcmd) run_command(rcmd) if mod_name == "bi-web": rcmd = "sudo sed -i 's/100.98.139.47[ \t]*bi-api/10.44.145.219\tbi-api/g' /etc/hosts" logger_root.info(rcmd) run_command(rcmd) self.start_program()
def run(self): logger_root.debug('Starting ' + self.name) global tmp_dir global queue_lock global error_list global succeed_list global download_list global exitFlag global err_exit while True: queue_lock.acquire() if not DownloadThread.__queue.empty(): try: self.q_set = DownloadThread.__queue.get(block=False) except: queue_lock.release() break else: queue_lock.release() # print self.q_set #打印队列条目 self.dir = self.q_set[0] self.url = self.q_set[1] fname = os.path.basename(self.url) if self.url in download_list: #if os.path.exists(tmp_dir + fname): 使用新的更为精确的download_list作为判断条件 logger_root.warning('%s duplicate download items %s.' % (self.name, self.url)) elif not os.path.exists(self.dir + fname): queue_lock.acquire() download_list.add(self.url) queue_lock.release() logger_root.info('%s start download %s.' % (self.name, self.url)) try: host = urllib2.urlparse.urlparse(self.url).netloc headers = {'Host': host, 'User-Agent':'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.101 Safari/537.36', 'Accept':'*/*', 'Connection':'keep-alive' } req = urllib2.Request(self.url,headers=headers) handle = urllib2.urlopen(req, timeout=120) etag = handle.headers['etag'].strip('"') s_length = int(handle.headers["Content-Length"].strip('"')) d_length = 0 with open(tmp_dir + fname, 'wb') as f_handler: while True: if exitFlag: raise KeyboardInterrupt buf = 4096 if s_length - d_length > 4096 else s_length - d_length if buf == 0: f_handler.flush() break chunk = handle.read(buf) # if not chunk: #改用Content-Length与已下载大小之差来判断 # break if not chunk and s_length != d_length: raise Exception, 'Network failure appeared in the process of download %s.' % self.url f_handler.write(chunk) f_handler.flush() d_length += len(chunk) except KeyboardInterrupt: while not f_handler.closed: time.sleep(1) if self.check_file(tmp_dir + fname, etag): move(tmp_dir + fname, self.dir + fname) succeed_list.add(self.url) logger_root.info('%s Successful download %s.' % (self.name, self.url)) else: os.remove(tmp_dir + fname) # error_list.add((self.dir, self.url)) logger_root.warning('%s stop download %s' % (self.name, self.url)) break except URLError, e: logger_root.error('%s %s %s' % (self.name, self.url, str(e))) error_list.add((self.dir, self.url)) queue_lock.acquire() download_list.discard(self.url) queue_lock.release() continue except socket.timeout, e: os.remove(tmp_dir + fname) logger_root.error('%s %s %s' % (self.name, self.url, str(e))) error_list.add((self.dir, self.url)) except IOError, e: os.remove(tmp_dir + fname) logger_root.error('%s %s %s' % (self.name, self.url, str(e))) print traceback.format_exc() break except Exception, e: os.remove(tmp_dir + fname) logger_root.error('%s %s %s' % (self.name, self.url, str(e))) error_list.add((self.dir, self.url)) print traceback.format_exc()
if __name__ == '__main__': cur_pid = os.getpid() s, o = commands.getstatusoutput("ps aux | grep 'kaikeba videolocaldeploy' | grep -Ev '(grep|%s)'" % cur_pid) if s: #URL合并类 urlmerge = URLMerge() i=1 #调用api获得下载视频的 for api_get_url in api_get_url_list: rest = http_get(api_get_url) url_dic=eval(rest) if url_dic['code'] == 200: logger_root.info('api调用成功!') if url_dic.has_key("data") and len(url_dic["data"]): logger_root.info('第%d个课程id下有视频!' % i) urldicts = urlmerge.get_url(url_dic["data"], mp4_prefix=prefixs['mp4_prefix'], gif_prefix=prefixs['gif_prefix'], srt_prefix=prefixs['srt_prefix']) #生成下载信息dict q = init_queue(urldicts) #初始化下载队列 while True: output = commands.getoutput('ps aux|grep wget|grep -v grep|wc -l') if q.empty(): logger_root.info('第%d个课程下的所有视频都下载完成!' % i) if i == len(api_get_url_list): send_mail("本地部署course%s" % course_id_list,"所有视频都下载完成!") break elif int(output) <= 10: aa=q.get()
if __name__ == '__main__': cur_pid = os.getpid() s, o = commands.getstatusoutput( "ps aux | grep 'kaikeba videolocaldeploy' | grep -Ev '(grep|%s)'" % cur_pid) if s: #URL合并类 urlmerge = URLMerge() #调用api获得下载视频的 rest = http_get() url_dic = eval(rest) if url_dic['code'] == 200: logger_root.info('api调用成功!') if url_dic.has_key("data") and len(url_dic["data"]): logger_root.info('此学校id下有视频!') urldicts = urlmerge.get_url( url_dic["data"], mp4_prefix=prefixs['mp4_prefix'], gif_prefix=prefixs['gif_prefix'], srt_prefix=prefixs['srt_prefix']) #生成下载信息dict q = init_queue(urldicts) #初始化下载队列 while True: output = commands.getoutput( 'ps aux|grep wget|grep -v grep|wc -l') if q.empty(): logger_root.info('所有视频都下载完成') send_mail("本地部署school%s" % school_id, "所有视频都下载完成!")
except Exception, e: os.remove(tmp_dir + fname) logger_root.error('%s %s %s' % (self.name, self.url, str(e))) error_list.add((self.dir, self.url)) print traceback.format_exc() else: while not f_handler.closed: time.sleep(1) d_length = os.path.getsize(tmp_dir + fname) #可能并发下载流量太大,磁盘I/O跟不上,增加下载后文件的实际大小,提高对比准确性 if s_length != d_length: time.sleep(60) d_length = os.path.getsize(tmp_dir + fname) if self.check_file(tmp_dir + fname, etag): move(tmp_dir + fname, self.dir + fname) succeed_list.add(self.url) logger_root.info('%s Successful download %s.' % (self.name, self.url)) else: os.remove(tmp_dir + fname) # move(tmp_dir + fname, '/home/html/lcms/video/' + fname) error_list.add((self.dir, self.url)) logger_root.error('%s Incomplete download %s, source file length is %s, downloaded file length is %s.' % (self.name, self.url, s_length, d_length)) finally: try: handle.close() queue_lock.acquire() download_list.discard(self.url) queue_lock.release() except Exception, e: # logger_root.error('try_finally %s %s %s.' % (self.name, self.url, str(e))) pass else: