async def websocket_endpoint( websocket: WebSocket, uuid: UUID, # user: models.User = Depends(deps.get_current_user), ) -> None: await websocket.accept() with session_manager(DatabaseSession) as db: plot_queue = crud.plot_queue.get(db, id=uuid) if plot_queue is None: return await websocket.send_json( {"error": "No plot queue with such id"}) if plot_queue.execution_id is None: return await websocket.send_json( {"error": "No execution is bound to a queue"}) host = plot_queue.server.hostname.split(":")[0] port = plot_queue.server.worker_port uri = (f"ws://{host}:{port}/plotting/ws/" f"?execution_id={plot_queue.execution_id}") async with websockets.connect(uri) as proxy_websocket: while True: data = await proxy_websocket.recv() if isinstance(data, bytes): data = data.decode("utf-8") await websocket.send_text(data)
def run(self, isp_id: int, domain: str): with session_manager() as db_session: isp = crud_isp.get(db_session=db_session, id=isp_id) isp_instance = isp.isp_instance if not isp: result = { 'status': 'pass', 'msg': f"empty isp via isp_id: {isp_id}" } return self.set_result(result) formatted_domain = self.format_domain(domain) domain_detect_result = isp_instance.check_domain_raw(formatted_domain) domain_detect_replay_data = domain_detect_result.get('reply', {}) available_domain_data = domain_detect_replay_data.get("available", {}) unavailable_domain_data = domain_detect_replay_data.get( "unavailable", {}) domain_list = [] for available_domain_data in self.extract_domain_data( available_domain_data): domain_list.append(dict(purchasable=True, **available_domain_data)) for unavailable_domain_data in self.extract_domain_data( unavailable_domain_data): domain_list.append( dict(purchasable=False, **unavailable_domain_data)) return domain_list
def run(self, domain_id): with session_manager() as db_session: domain = crud_domain.get(db_session=db_session, id=domain_id) if domain: self.load_domain_dns_record(db_session, domain) self.load_domain_dns_server(db_session, domain) return self.set_result()
def run(self, vps_id: int, *args, **kwargs): task_result = {} with session_manager() as db_session: vps_obj = crud_vps.get(db_session=db_session, id=vps_id) if vps_obj and vps_obj.server_id: task_result = vps_obj.isp.isp_instance.destroy_server(vps_obj.server_id) return task_result
def run(self, team_server_id: int): with session_manager() as db_session: team_server_obj = crud_team_server.get(db_session=db_session, id=team_server_id) ssh_obj = crud_ssh_config.get_config(db_session) if not ssh_obj: return tmp_dir = TemporaryDirectory() ssh_conn = self.gen_ssh_conn( addr=f"root@{team_server_obj.ip}", private_key=ssh_obj.private_key, tmp_dir=tmp_dir.name ) # 1. install requirement install_lib_script = "apt-get install -y wget unzip" self.exec_remote_cmd( conn=ssh_conn, command=install_lib_script ) # 2. download c2 profile , teamserver, cs template_render = TemplateRender() c2_content = team_server_obj.c2_profile.profile_content team_server_content = template_render.render( 'scripts/team_server.sh', **{'port': team_server_obj.port} ) cs_content = template_render.render( 'scripts/cs_install.sh', cs_url=team_server_obj.cs_download_url, zip_pwd=team_server_obj.zip_password, cs_pwd=team_server_obj.password, kill_date=team_server_obj.kill_date ) c2_tmp_file = self.gen_tmp_file( content=c2_content, dir_path=tmp_dir.name ) team_server_file = self.gen_tmp_file( content=team_server_content, dir_path=tmp_dir.name ) cs_server_file = self.gen_tmp_file( content=cs_content, dir_path=tmp_dir.name ) self.upload_remote_file( conn=ssh_conn, source_file=c2_tmp_file.name, remote_file='ok.profile' ) self.upload_remote_file( conn=ssh_conn, source_file=team_server_file.name, remote_file='teamserver' ) self.upload_remote_file( conn=ssh_conn, source_file=cs_server_file.name, remote_file='cs.sh' ) # 3. exec cs.sh self.exec_remote_cmd(conn=ssh_conn, command='chmod +x cs.sh && bash cs.sh') return self.set_result()
def reload_vps_isp_config(self, vps_isp_id: int) -> bool: with session_manager() as db_session: rp = RedisPool() try: vps_raw_spec_data = rp.get_vps_spec_data(db_session=db_session, isp_id=vps_isp_id, reload=True) except: vps_raw_spec_data = None return bool(vps_raw_spec_data)
def run(self, vps_profile: VpsCreateSchema, vps_id: int, *args, **kwargs) -> dict: with session_manager() as db_session: vps_isp_obj = crud_isp.get(db_session=db_session, id=vps_profile['isp_id']) ssh_key_obj = crud_ssh_config.get_config(db_session) extra_server_data = vps_isp_obj.isp_instance.create_server(vps_profile, ssh_key_obj.public_key) vps_data = dict( status=VpsStatus.running, **extra_server_data ) crud_vps.update(db_session=db_session, obj_id=vps_id, obj_in=vps_data) return self.set_result()
def on_failure(self, exc, task_id, args, kwargs, einfo) -> None: vps_id = args[-1] if isinstance(args[-1], int) else None if not vps_id: return self.log_exception(exc) with session_manager() as db_session: error_data = dict( status=VpsStatus.error, status_msg=str(einfo) ) crud_vps.update(db_session=db_session, obj_id=vps_id, obj_in=error_data)
def run(self, *args, **kwargs): with session_manager() as db_session: domain_list = crud_domain.get_domain_list(db_session) load_domain_extra_data_task = group([ LoadDomainExtraDataTask().s(domain.id) for domain in domain_list ]) load_result = load_domain_extra_data_task.delay() with allow_join_result(): load_result.join() return self.set_result()
def run(self, isp_id: int, domain: str, **kwargs: dict): with session_manager() as db_session: isp = crud_isp.get(db_session=db_session, id=isp_id) try: register_success = isp.isp_instance.register_domain(domain) except Exception as e: self.log_exception(e) register_success = False if register_success: domain_profile = DomainCreate(isp_id=isp_id, domain=domain) crud_domain.create(db_session=db_session, obj_in=domain_profile, serializer=None) return register_success
def run(self, task_data: dict, *args, **kwargs): with session_manager() as db_session: domain_dns_obj_list = crud_domain_dns_record.filter_by( db_session=db_session, domain_id=task_data['domain_id']) for domain_dns_obj in domain_dns_obj_list: domain_dns_type = domain_dns_obj.type if domain_dns_type.lower() in ['a', 'cname']: domain_health_data = self.fetch_domain_health_record( domain_name=task_data['domain_name']) domain_health_obj = dict(domain_id=task_data['domain_id'], task_id=task_data['id'], host=domain_dns_obj.host, **domain_health_data) crud_domain_health.create(db_session=db_session, obj_in=domain_health_obj, serializer=None)
def run(self, *args, **kwargs) -> dict: task_result = {'handled_vps_isp_list': []} with session_manager() as db_session: vps_isp_obj_list = crud_isp.get_vps_isp_list(db_session).all() vps_isp_data_list = parse_obj_as(List[IspItem], vps_isp_obj_list) for vps_isp_data in vps_isp_data_list: isp_provider_name = vps_isp_data.provider_name if isp_provider_name in task_result['handled_vps_isp_list']: continue reload_result = self.reload_vps_isp_config(vps_isp_data.id) if reload_result: task_result['handled_vps_isp_list'].append( isp_provider_name) return self.set_result(task_result)
def run(self, redirector_id: int): with session_manager() as db_session: redirector_obj = crud_redirector.get(db_session=db_session, id=redirector_id) ssh_obj = crud_ssh_config.get_config(db_session) if not ssh_obj: return tmp_dir = TemporaryDirectory() ssh_conn = self.gen_ssh_conn( addr=f"root@{redirector_obj.ip}", private_key=ssh_obj.private_key, tmp_dir=tmp_dir.name ) template_render = TemplateRender() c2_content = redirector_obj.team_server.c2_profile.profile_content c2_tmp_file = self.gen_tmp_file( content=c2_content, dir_path=tmp_dir.name ) self.upload_remote_file( conn=ssh_conn, source_file=c2_tmp_file.name, remote_file='c2.profile' ) redirector_content = template_render.render( 'scripts/c2_redirectors.sh', domain=redirector_obj.domain_name, ssl=1, c2_profile='~/c2.profile', cs2_server_ip=redirector_obj.team_server.cs_conn_url, redirect=redirector_obj.redirect_domain ) redirector_bash_file = self.gen_tmp_file( content=redirector_content, dir_path=tmp_dir.name ) self.upload_remote_file( conn=ssh_conn, source_file=redirector_bash_file.name, remote_file='redirector.sh' ) self.exec_remote_cmd(conn=ssh_conn, command='chmod +x redirector.sh && bash redirector.sh')
def run(self, grow_domain_id: int): with session_manager() as db_session: grow_domain_obj = crud_domain_grow.get(db_session=db_session, id=grow_domain_id) if not grow_domain_obj: return ip_address = grow_domain_obj.vps.ip if not ip_address: return tmp_dir = TemporaryDirectory() ssh_obj = crud_ssh_config.get_config(db_session) ssh_conn = self.gen_ssh_conn(addr=f"root@{ip_address}", tmp_dir=tmp_dir.name, private_key=ssh_obj.private_key) self.exec_remote_cmd( conn=ssh_conn, command=( "ps -aux | grep 'nginx:' | awk '{print $2}'| xargs kill"), warn=True) return self.set_result()
def run(self, *args, **kwargs): redis_pool = RedisPool() with session_manager() as db_session: active_task_obj_list = crud_domain_task.get_active_task( db_session=db_session).all() for active_task_obj in active_task_obj_list: active_task_data = { 'id': active_task_obj.id, 'interval': active_task_obj.interval, 'domain_id': active_task_obj.domain_id, 'domain_name': active_task_obj.domain_name } monitor_task_running_key = redis_pool.gen_task_status_key( status=redis_pool.TASK_RUNNING_STATUS, sequence=active_task_data['id']) monitor_already_scheduled = redis_pool.exists( monitor_task_running_key) if not monitor_already_scheduled: redis_pool.set_data_cache(monitor_task_running_key, 1, ex=active_task_data['interval']) MonitorDomainRunnerTask().delay(active_task_data)
def run(self, grow_domain_id: int): with session_manager() as db_session: grow_domain_obj = crud_domain_grow.get(db_session=db_session, id=grow_domain_id) if not grow_domain_obj: return ip_address = grow_domain_obj.vps.ip tmp_dir = TemporaryDirectory() ssh_obj = crud_ssh_config.get_config(db_session) site_work_dir = f"/opt/{PROJECT_NAME}/site" site_data_dir = f"/opt/{PROJECT_NAME}/data" # 1. install nginx ssh_conn = self.gen_ssh_conn(addr=f"root@{ip_address}", tmp_dir=tmp_dir.name, private_key=ssh_obj.private_key) install_nginx_command = ( "command -v yum && yum install -y epel-release && yum install -y nginx unzip;" "command -v apt-get && apt-get update -y && apt-get install -y nginx unzip;" f"mkdir -p {site_work_dir} {site_data_dir}") self.exec_remote_cmd(conn=ssh_conn, command=install_nginx_command) # 2. upload template file site_template_file_name = grow_domain_obj.template.zip_file_name site_template_content = BytesIO( grow_domain_obj.template.zip_file_content).read() site_template_tmp_file = self.gen_tmp_file( content=site_template_content, dir_path=tmp_dir.name) self.upload_remote_file( conn=ssh_conn, source_file=site_template_tmp_file.name, remote_file=f"{site_data_dir}/{site_template_file_name}") # 3. update nginx conf and configure nginx self.exec_remote_cmd( conn=ssh_conn, command= (f"rm -rf {site_work_dir} &&" f"unzip -o -d {site_work_dir} {site_data_dir}/{site_template_file_name};" )) nginx_config_content = TemplateRender().render_nginx_conf( nginx_site_work_dir=site_work_dir) nginx_config_tmp_file = self.gen_tmp_file( content=nginx_config_content, dir_path=tmp_dir.name) nginx_conf_deploy_path = f"{site_data_dir}/{TemplateRender.NGINX_TEMPLATE_CONF}" self.upload_remote_file(conn=ssh_conn, source_file=nginx_config_tmp_file.name, remote_file=nginx_conf_deploy_path) self.exec_remote_cmd( conn=ssh_conn, command=( "ps -aux | grep 'nginx:' | awk '{print $2}'| xargs kill"), warn=True) self.exec_remote_cmd( conn=ssh_conn, command=(f"nginx -c {nginx_conf_deploy_path}")) # 4. set dns record grow_domain_obj.isp.isp_instance.set_dns_a_record( grow_domain_obj.domain_name, ip_address) return
def server_ping_task( self: celery.Task, *, session_factory: Callable[[], Session] = DatabaseSession, ) -> Any: with session_manager(session_factory) as db: server_ids = [server.id for server in crud.server.get_multi(db)[1]] log_collector = ConsoleLogCollector() for server_id in server_ids: with session_manager(session_factory) as db: server = crud.server.get(db, id=server_id) if server is None: raise RuntimeError( f"Can not find a server data with id {server_id} in a database" ) server_data = schemas.ServerReturn.from_orm(server) _, directory_objects = crud.directory.get_multi_by_server(db, server=server) directories: dict[str, uuid.UUID] = { schemas.DirectoryReturn.from_orm(directory).location: directory.id for directory in directory_objects } host = server_data.hostname.split(":")[0] worker_password = server.worker_password worker_port = server.worker_port uri = f"http://{host}:{worker_port}" # Connect to server and get a token try: log_collector.update_log( stdout=f"\nPOST {uri}/login/access-token/\n".encode("utf8") ) login_responce = requests.post( f"{uri}/login/access-token/", data={"username": "******", "password": worker_password}, ) except requests.exceptions.ConnectionError: log_collector.update_log( stdout=f"\n Can not connect to {uri} \n".encode("utf8") ) with session_manager(session_factory) as db: server = crud.server.get(db, id=server_id) if server is None: raise RuntimeError(f"Server with id {server_id} has gone away") server = crud.server.update( db, db_obj=server, obj_in={"status": "failed"} ) continue else: log_collector.update_log(stdout=login_responce.content) if not login_responce.ok: register_request = requests.post( f"{uri}/user/", json={"nickname": "admin", "password": worker_password}, ) log_collector.update_log( stdout=f"\nPOST {register_request.url}\n".encode("utf8") ) log_collector.update_log(stdout=register_request.content) if not register_request.ok: with session_manager(session_factory) as db: server = crud.server.get(db, id=server_id) if server is None: raise RuntimeError( f"Server with id {server_id} has gone away" ) server = crud.server.update( db, db_obj=server, obj_in={"status": "failed"} ) continue login_responce = requests.post( f"{uri}/login/access-token/", data={"username": "******", "password": worker_password}, ) if not login_responce.ok: raise RuntimeError( "Unexpected register: " "registration was successful, " "but login is impossible" ) with session_manager(session_factory) as db: server = crud.server.get(db, id=server_id) if server is None: raise RuntimeError(f"Server with id {server_id} has gone away") server = crud.server.update( db, db_obj=server, obj_in={"status": "connected"} ) token_data = schemas.Token(**login_responce.json()) auth_headers = {"Authorization": f"Bearer {token_data.access_token}"} # Try too load metadata metadata_responce = requests.get(f"{uri}/metadata/", headers=auth_headers) log_collector.update_log( stdout=f"\nGET {metadata_responce.url}\n".encode("utf8") ) log_collector.update_log(stdout=metadata_responce.content) with session_manager(session_factory) as db: server = crud.server.get(db, id=server_id) if server is None: raise RuntimeError(f"Server with id {server_id} has gone away") if metadata_responce.status_code == 404: server = crud.server.update( db, db_obj=server, obj_in={"worker_version": "< 0.1.0"} ) elif metadata_responce.status_code == 200: server = crud.server.update( db, db_obj=server, obj_in={"worker_version": metadata_responce.json()["version"]}, ) else: server = crud.server.update( db, db_obj=server, obj_in={"worker_version": "undefined"} ) directories_request = requests.post( f"{uri}/directories/", json=jsonable_encoder(list(directories.keys())), headers=auth_headers, ) log_collector.update_log( stdout=f"\nPOST {directories_request.url}\n".encode("utf8") ) log_collector.update_log(stdout=directories_request.content) if not directories_request.ok: server = crud.server.get(db, id=server_id) if server is None: raise RuntimeError(f"Server with id {server_id} has gone away") server = crud.server.update( db, db_obj=server, obj_in={"worker_version": f"{server.worker_version} (unsupported)"}, ) with session_manager(session_factory) as db: try: dir_data: dict[pathlib.Path, Optional[schemas.DirInfo]] = { pathlib.Path(path): res and schemas.DirInfo(**res) for path, res in directories_request.json().items() } except Exception: for directory_id in directories.values(): directory = crud.directory.get(db, id=directory_id) if directory is not None: crud.directory.update( db, db_obj=directory, obj_in={"status": "failed"} ) continue log_collector.update_log(stdout=f"\n{directories}".encode("utf8")) for loc, data in dir_data.items(): log_collector.update_log(stdout=f"\n{loc}".encode("utf8")) if loc.name not in directories: continue directory = crud.directory.get(db, id=directories[loc.name]) if directory is None: continue if data is None: directory = crud.directory.update( db, db_obj=directory, obj_in={"status": "failed"} ) continue directory = crud.directory.update( db, db_obj=directory, obj_in={ "status": "monitoring", "disk_size": data.disk_size and data.disk_size.total, "disk_taken": data.disk_size and data.disk_size.used, }, ) for plot in data.plots: plot_obj = crud.plot.get_by_name(db, name=plot.name) if plot_obj is None: crud.plot.create( db, obj_in=schemas.PlotCreate( name=plot.name, created_queue_id=plot.queue, located_directory_id=directory.id, status=schemas.PlotStatus.PLOTTING if plot.plotting else schemas.PlotStatus.PLOTTED, ), ) else: plot_obj = crud.plot.update( db, db_obj=plot_obj, obj_in={ "located_directory_id": directory.id, "status": schemas.PlotStatus.PLOTTED.value if plot_obj.status == schemas.PlotStatus.PLOTTING.value and not plot.plotting else plot_obj.status, }, ) return {"info": "done", "console": log_collector.get()}
def transfer_scan_task( self: celery.Task, *, session_factory: Callable[[], Session] = DatabaseSession, ) -> Any: with session_manager(session_factory) as db: plot_queue_ids = [ plot_queue.id for plot_queue in crud.plot_queue.get_multi(db)[1] ] log_collector = ConsoleLogCollector() for plot_queue_id in plot_queue_ids: with session_manager(session_factory) as db: plot_queue = crud.plot_queue.get(db, id=plot_queue_id) if plot_queue is None: raise RuntimeError( f"Can not find a plot queue with id {plot_queue_id} in a database" ) server_data = schemas.ServerReturn.from_orm(plot_queue.server) final_dir = plot_queue.final_dir.location temp_dir = plot_queue.temp_dir.location pool_key = plot_queue.server.pool_key farmer_key = plot_queue.server.farmer_key plots_amount = plot_queue.plots_amount k = plot_queue.k threads = plot_queue.threads ram = plot_queue.ram execution_id = plot_queue.execution_id autoplot = plot_queue.autoplot plotting_started = plot_queue.plotting_started host = server_data.hostname.split(":")[0] worker_password = plot_queue.server.worker_password worker_port = plot_queue.server.worker_port plotting_data = schemas.PlottingData( final_dir=final_dir, temp_dir=temp_dir, queue_id=plot_queue_id, pool_key=pool_key, farmer_key=farmer_key, plots_amount=plots_amount, k=k, threads=threads, ram=ram, ) login_responce = requests.post( f"http://{host}:{worker_port}/login/access-token/", data={ "username": "******", "password": worker_password }, ) log_collector.update_log( stdout=f"\nPOST {login_responce.url}\n".encode("utf8")) log_collector.update_log(stdout=login_responce.content) if not login_responce.ok: continue token_data = schemas.Token(**login_responce.json()) auth_headers = {"Authorization": f"Bearer {token_data.access_token}"} if execution_id is None: if autoplot or plotting_started is None: responce = requests.post( f"http://{host}:{worker_port}/plotting/", headers=auth_headers, json=jsonable_encoder(plotting_data), ) log_collector.update_log( stdout=f"\nPOST {responce.url}\n".encode("utf8")) log_collector.update_log(stdout=responce.content) with session_manager(session_factory) as db: plot_queue = crud.plot_queue.get(db, id=plot_queue_id) if not responce.ok: crud.plot_queue.update( db, db_obj=plot_queue, obj_in={ "status": schemas.PlotQueueStatus.FAILED.value }, ) else: plotting_return = schemas.PlottingReturn( **responce.json()) plot_queue = crud.plot_queue.update( db, db_obj=plot_queue, obj_in={ "status": schemas.PlotQueueStatus.PLOTTING.value, "plotting_started": datetime.utcnow(), "execution_id": plotting_return.id, }, ) else: responce = requests.get( f"http://{host}:{worker_port}/plotting/{execution_id}/", headers=auth_headers, ) log_collector.update_log( stdout=f"\nGET {responce.url}\n".encode("utf8")) log_collector.update_log(stdout=responce.content) with session_manager(session_factory) as db: plot_queue = crud.plot_queue.get(db, id=plot_queue_id) if not responce.ok: crud.plot_queue.update( db, db_obj=plot_queue, obj_in={ "status": schemas.PlotQueueStatus.FAILED.value }, ) else: plotting_data = schemas.PlottingReturn(**responce.json()) if plotting_data.finished: # NOTE for some reason chia plot create command exits # with -1 of success (except 0) if plotting_data.status_code == -1: crud.plot_queue.update( db, db_obj=plot_queue, obj_in={ "status": schemas.PlotQueueStatus.WAITING.value if autoplot else schemas.PlotQueueStatus.PAUSED.value, "execution_id": None, }, ) else: crud.plot_queue.update( db, db_obj=plot_queue, obj_in={ "status": schemas.PlotQueueStatus.FAILED.value }, ) else: plot_queue = crud.plot_queue.update( db, db_obj=plot_queue, obj_in={ "status": schemas.PlotQueueStatus.PLOTTING.value }, ) return {"info": "done", "console": log_collector.get()}