def deploy_dispatch(request, req, token): rds = get_redis_connection() try: api_token = uuid.uuid4().hex rds.setex(api_token, 60 * 60, f'{req.deploy.app_id},{req.deploy.env_id}') helper = Helper(rds, token) helper.send_step('local', 1, f'完成\r\n{human_time()} 发布准备... ') env = AttrDict( SPUG_APP_NAME=req.deploy.app.name, SPUG_APP_ID=str(req.deploy.app_id), SPUG_REQUEST_NAME=req.name, SPUG_REQUEST_ID=str(req.id), SPUG_ENV_ID=str(req.deploy.env_id), SPUG_ENV_KEY=req.deploy.env.key, SPUG_VERSION=req.version, SPUG_DEPLOY_TYPE=req.type, SPUG_API_TOKEN=api_token, ) if req.deploy.extend == '1': env.update(json.loads(req.deploy.extend_obj.custom_envs)) _ext1_deploy(req, helper, env) else: _ext2_deploy(req, helper, env) req.status = '3' except Exception as e: req.status = '-3' raise e finally: rds.expire(token, 5 * 60) rds.close() req.save() Helper.send_deploy_notify(req)
def _ext2_deploy(req, helper, env): extend = req.deploy.extend_obj extras = json.loads(req.extra) host_actions = json.loads(extend.host_actions) server_actions = json.loads(extend.server_actions) if extras and extras[0]: env.update({'SPUG_RELEASE': extras[0]}) step = 2 for action in server_actions: helper.send_step('local', step, f'\r\n{human_time()} {action["title"]}...\r\n') helper.local(f'cd /tmp && {action["data"]}', env) step += 1 helper.send_step('local', 100, '完成\r\n' if step == 2 else '\r\n') if host_actions: threads, latest_exception = [], None with futures.ThreadPoolExecutor(max_workers=min(10, os.cpu_count() + 5)) as executor: for h_id in json.loads(req.host_ids): env = AttrDict(env.items()) t = executor.submit(_deploy_ext2_host, helper, h_id, host_actions, env) t.h_id = h_id threads.append(t) for t in futures.as_completed(threads): exception = t.exception() if exception: latest_exception = exception if not isinstance(exception, SpugError): helper.send_error(t.h_id, f'Exception: {exception}', False) if latest_exception: raise latest_exception else: helper.send_step('local', 100, f'\r\n{human_time()} ** 发布成功 **')
def _ext1_deploy(req, helper, env): extend = req.deploy.extend_obj extras = json.loads(req.extra) if extras[0] == 'branch': tree_ish = extras[2] env.update(SPUG_GIT_BRANCH=extras[1], SPUG_GIT_COMMIT_ID=extras[2]) else: tree_ish = extras[1] env.update(SPUG_GIT_TAG=extras[1]) if req.type == '2': helper.send_step('local', 6, f'完成\r\n{human_time()} 回滚发布... 跳过') else: helper.local(f'cd {REPOS_DIR} && rm -rf {req.deploy_id}_*') helper.send_step('local', 1, '完成\r\n') if extend.hook_pre_server: helper.send_step('local', 2, f'{human_time()} 检出前任务...\r\n') helper.local(f'cd /tmp && {extend.hook_pre_server}', env) helper.send_step('local', 3, f'{human_time()} 执行检出... ') git_dir = os.path.join(REPOS_DIR, str(req.deploy.id)) command = f'cd {git_dir} && git archive --prefix={env.SPUG_VERSION}/ {tree_ish} | (cd .. && tar xf -)' helper.local(command) helper.send_step('local', 3, '完成\r\n') if extend.hook_post_server: helper.send_step('local', 4, f'{human_time()} 检出后任务...\r\n') helper.local( f'cd {os.path.join(REPOS_DIR, env.SPUG_VERSION)} && {extend.hook_post_server}', env) helper.send_step('local', 5, f'\r\n{human_time()} 执行打包... ') filter_rule, exclude, contain = json.loads( extend.filter_rule), '', env.SPUG_VERSION files = helper.parse_filter_rule(filter_rule['data']) if files: if filter_rule['type'] == 'exclude': exclude = ' '.join(f'--exclude={x}' for x in files) else: contain = ' '.join(f'{env.SPUG_VERSION}/{x}' for x in files) helper.local( f'cd {REPOS_DIR} && tar zcf {env.SPUG_VERSION}.tar.gz {exclude} {contain}' ) helper.send_step('local', 6, f'完成') with futures.ThreadPoolExecutor(max_workers=min(10, os.cpu_count() + 5)) as executor: threads = [] for h_id in json.loads(req.host_ids): env = AttrDict(env.items()) threads.append( executor.submit(_deploy_ext1_host, helper, h_id, extend, env)) for t in futures.as_completed(threads): exception = t.exception() if exception: helper.send_error(h_id, f'Exception: {exception}') raise exception
def _ext1_deploy(req, helper, env): if not req.repository_id: rep = Repository(app_id=req.deploy.app_id, env_id=req.deploy.env_id, deploy_id=req.deploy_id, version=req.version, spug_version=req.spug_version, extra=req.extra, remarks='SPUG AUTO MAKE', created_by_id=req.created_by_id) build_repository(rep, helper) req.repository = rep extras = json.loads(req.extra) if extras[0] == 'repository': extras = extras[1:] if extras[0] == 'branch': env.update(SPUG_GIT_BRANCH=extras[1], SPUG_GIT_COMMIT_ID=extras[2]) else: env.update(SPUG_GIT_TAG=extras[1]) if req.deploy.is_parallel: threads, latest_exception = [], None max_workers = max(10, os.cpu_count() * 5) with futures.ThreadPoolExecutor(max_workers=max_workers) as executor: for h_id in json.loads(req.host_ids): new_env = AttrDict(env.items()) t = executor.submit(_deploy_ext1_host, req, helper, h_id, new_env) t.h_id = h_id threads.append(t) for t in futures.as_completed(threads): exception = t.exception() if exception: latest_exception = exception if not isinstance(exception, SpugError): helper.send_error(t.h_id, f'Exception: {exception}', False) if latest_exception: raise latest_exception else: host_ids = sorted(json.loads(req.host_ids), reverse=True) while host_ids: h_id = host_ids.pop() new_env = AttrDict(env.items()) try: _deploy_ext1_host(req, helper, h_id, new_env) except Exception as e: helper.send_error(h_id, f'Exception: {e}', False) for h_id in host_ids: helper.send_error(h_id, '终止发布', False) raise e
def dispatch(rep: Repository, helper=None): rep.status = '1' alone_build = helper is None if not helper: rds = get_redis_connection() rds_key = f'{settings.BUILD_KEY}:{rep.spug_version}' helper = Helper(rds, rds_key) rep.save() try: api_token = uuid.uuid4().hex helper.rds.setex(api_token, 60 * 60, f'{rep.app_id},{rep.env_id}') helper.send_info( 'local', f'\033[32m完成√\033[0m\r\n{human_time()} 构建准备... ') env = AttrDict( SPUG_APP_NAME=rep.app.name, SPUG_APP_KEY=rep.app.key, SPUG_APP_ID=str(rep.app_id), SPUG_DEPLOY_ID=str(rep.deploy_id), SPUG_BUILD_ID=str(rep.id), SPUG_ENV_ID=str(rep.env_id), SPUG_ENV_KEY=rep.env.key, SPUG_VERSION=rep.version, SPUG_API_TOKEN=api_token, SPUG_REPOS_DIR=REPOS_DIR, ) # append configs configs = compose_configs(rep.app, rep.env_id) configs_env = {f'_SPUG_{k.upper()}': v for k, v in configs.items()} env.update(configs_env) _build(rep, helper, env) rep.status = '5' except Exception as e: rep.status = '2' raise e finally: helper.local(f'cd {REPOS_DIR} && rm -rf {rep.spug_version}') close_old_connections() if alone_build: helper.clear() rep.save() return rep elif rep.status == '5': rep.save()
def dispatch(req): rds = get_redis_connection() rds_key = f'{settings.REQUEST_KEY}:{req.id}' helper = Helper(rds, rds_key) try: api_token = uuid.uuid4().hex rds.setex(api_token, 60 * 60, f'{req.deploy.app_id},{req.deploy.env_id}') env = AttrDict( SPUG_APP_NAME=req.deploy.app.name, SPUG_APP_KEY=req.deploy.app.key, SPUG_APP_ID=str(req.deploy.app_id), SPUG_REQUEST_NAME=req.name, SPUG_DEPLOY_ID=str(req.deploy.id), SPUG_REQUEST_ID=str(req.id), SPUG_ENV_ID=str(req.deploy.env_id), SPUG_ENV_KEY=req.deploy.env.key, SPUG_VERSION=req.version, SPUG_DEPLOY_TYPE=req.type, SPUG_API_TOKEN=api_token, SPUG_REPOS_DIR=REPOS_DIR, ) # append configs configs = compose_configs(req.deploy.app, req.deploy.env_id) configs_env = {f'_SPUG_{k.upper()}': v for k, v in configs.items()} env.update(configs_env) if req.deploy.extend == '1': _ext1_deploy(req, helper, env) else: _ext2_deploy(req, helper, env) req.status = '3' except Exception as e: req.status = '-3' raise e finally: close_old_connections() req.save() helper.clear() Helper.send_deploy_notify(req)
def _sync_host_extend(host, private_key=None, public_key=None, password=None, ssh=None): if not ssh: kwargs = host.to_dict(selects=('hostname', 'port', 'username')) ssh = _get_ssh(kwargs, host.pkey, private_key, public_key, password) form = AttrDict(fetch_host_extend(ssh)) form.disk = json.dumps(form.disk) form.public_ip_address = json.dumps(form.public_ip_address) form.private_ip_address = json.dumps(form.private_ip_address) form.updated_at = human_datetime() form.os_type = check_os_type(form.os_name) if hasattr(host, 'hostextend'): extend = host.hostextend extend.update_by_dict(form) else: extend = HostExtend.objects.create(host=host, **form) return extend
def add_monitor(request, app, host_ids, deploy_id): """ 添加监控 """ from apps.monitor.views import add_monitor as am from libs.utils import AttrDict for h_id in host_ids: params = { 'id': None, 'name': app.name, 'addr': h_id, 'type': '3', 'extra': app.key, 'notify_grp': [], 'notify_mode': [], 'deploy_id': deploy_id } p = AttrDict() for k, v in params.items(): p[k] = v logger.info(f'添加监控: {params}') am(request.user, p)
def _ext2_deploy(req, helper, env): helper.send_info('local', f'\033[32m完成√\033[0m\r\n') extend, step = req.deploy.extend_obj, 1 host_actions = json.loads(extend.host_actions) server_actions = json.loads(extend.server_actions) env.update({'SPUG_RELEASE': req.version}) if req.version: for index, value in enumerate(req.version.split()): env.update({f'SPUG_RELEASE_{index + 1}': value}) for action in server_actions: helper.send_step('local', step, f'{human_time()} {action["title"]}...\r\n') helper.local(f'cd /tmp && {action["data"]}', env) step += 1 for action in host_actions: if action.get('type') == 'transfer': action['src'] = render_str( action.get('src', '').strip().rstrip('/'), env) action['dst'] = render_str(action['dst'].strip().rstrip('/'), env) if action.get('src_mode') == '1': break helper.send_step( 'local', step, f'{human_time()} 检测到来源为本地路径的数据传输动作,执行打包... \r\n') action['src'] = action['src'].rstrip('/ ') action['dst'] = action['dst'].rstrip('/ ') if not action['src'] or not action['dst']: helper.send_error( 'local', f'Invalid path for transfer, src: {action["src"]} dst: {action["dst"]}' ) if not os.path.exists(action['src']): helper.send_error( 'local', f'No such file or directory: {action["src"]}') is_dir, exclude = os.path.isdir(action['src']), '' sp_dir, sd_dst = os.path.split(action['src']) contain = sd_dst if action['mode'] != '0' and is_dir: files = helper.parse_filter_rule(action['rule'], ',', env) if files: if action['mode'] == '1': contain = ' '.join(f'{sd_dst}/{x}' for x in files) else: excludes = [] for x in files: if x.startswith('/'): excludes.append(f'--exclude={sd_dst}{x}') else: excludes.append(f'--exclude={x}') exclude = ' '.join(excludes) tar_gz_file = f'{req.spug_version}.tar.gz' helper.local( f'cd {sp_dir} && tar -zcf {tar_gz_file} {exclude} {contain}') helper.send_info('local', f'{human_time()} \033[32m完成√\033[0m\r\n') helper.add_callback( partial(os.remove, os.path.join(sp_dir, tar_gz_file))) break helper.send_step('local', 100, '') if host_actions: if req.deploy.is_parallel: threads, latest_exception = [], None max_workers = max(10, os.cpu_count() * 5) with futures.ThreadPoolExecutor( max_workers=max_workers) as executor: for h_id in json.loads(req.host_ids): new_env = AttrDict(env.items()) t = executor.submit(_deploy_ext2_host, helper, h_id, host_actions, new_env, req.spug_version) t.h_id = h_id threads.append(t) for t in futures.as_completed(threads): exception = t.exception() if exception: latest_exception = exception if not isinstance(exception, SpugError): helper.send_error(t.h_id, f'Exception: {exception}', False) if latest_exception: raise latest_exception else: host_ids = sorted(json.loads(req.host_ids), reverse=True) while host_ids: h_id = host_ids.pop() new_env = AttrDict(env.items()) try: _deploy_ext2_host(helper, h_id, host_actions, new_env, req.spug_version) except Exception as e: helper.send_error(h_id, f'Exception: {e}', False) for h_id in host_ids: helper.send_error(h_id, '终止发布', False) raise e else: helper.send_step('local', 100, f'\r\n{human_time()} ** 发布成功 **')
def _ext2_deploy(req, helper, env): extend = req.deploy.extend_obj extras = json.loads(req.extra) host_actions = json.loads(extend.host_actions) server_actions = json.loads(extend.server_actions) if extras and extras[0]: env.update({'SPUG_RELEASE': extras[0]}) step = 2 for action in server_actions: helper.send_step('local', step, f'\r\n{human_time()} {action["title"]}...\r\n') helper.local(f'cd /tmp && {action["data"]}', env) step += 1 helper.send_step('local', 100, '完成\r\n' if step == 2 else '\r\n') tmp_transfer_file = None for action in host_actions: if action.get('type') == 'transfer': helper.send_info('local', f'{human_time()} 检测到数据传输动作,执行打包... ') action['src'] = action['src'].rstrip('/ ') action['dst'] = action['dst'].rstrip('/ ') if not action['src'] or not action['dst']: helper.send_error( 'local', f'invalid path for transfer, src: {action["src"]} dst: {action["dst"]}' ) is_dir, exclude = os.path.isdir(action['src']), '' sp_dir, sd_dst = os.path.split(action['src']) contain = sd_dst if action['mode'] != '0' and is_dir: files = helper.parse_filter_rule(action['rule'], ',') if files: if action['mode'] == '1': contain = ' '.join(f'{sd_dst}/{x}' for x in files) else: excludes = [] for x in files: if x.startswith('/'): excludes.append(f'--exclude={sd_dst}{x}') else: excludes.append(f'--exclude={x}') exclude = ' '.join(excludes) tar_gz_file = f'{env.SPUG_VERSION}.tar.gz' helper.local( f'cd {sp_dir} && tar zcf {tar_gz_file} {exclude} {contain}') helper.send_info('local', '完成\r\n') tmp_transfer_file = os.path.join(sp_dir, tar_gz_file) break if host_actions: threads, latest_exception = [], None with futures.ThreadPoolExecutor( max_workers=min(10, os.cpu_count() + 5)) as executor: for h_id in json.loads(req.host_ids): env = AttrDict(env.items()) t = executor.submit(_deploy_ext2_host, helper, h_id, host_actions, env) t.h_id = h_id threads.append(t) for t in futures.as_completed(threads): exception = t.exception() if exception: latest_exception = exception if not isinstance(exception, SpugError): helper.send_error(t.h_id, f'Exception: {exception}', False) if tmp_transfer_file: os.remove(tmp_transfer_file) if latest_exception: raise latest_exception else: helper.send_step('local', 100, f'\r\n{human_time()} ** 发布成功 **')