async def main(subnet_tag='testnet'): package = await vm.repo( image_hash="746f7d86703c932d83809bf3ecc517450c5e23d693c9788e85b9682e", min_mem_gib=1, min_storage_gib=2.0, ) async def worker(ctx: WorkContext, tasks): async for task in tasks: ctx.send_file('fire.sh', '/golem/work/fire.sh') ctx.run('/bin/sh', '/golem/work/fire.sh') output_file = f"vim" ctx.download_file('/golem/work/vim/src/vim', 'vim') ctx.download_file('/golem/work/log.txt', 'log') yield ctx.commit() task.accept_task(result=output_file) ctx.log(f'done!') tasks: range = range(0, 1, 1) async with Engine( package=package, max_workers=1, budget=10.0, timeout=timedelta(minutes=10), subnet_tag=subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: async for task in engine.map(worker, [Task(data=task) for task in tasks]): print( f'\033[36;1mTask computed: {task}, result: {task.output}\033[0m' )
async def main(): package = await vm.repo( image_hash="f3484f325c29f6dc3a858865738454889d5c2c476f501cb1d67db94f", min_mem_gib=1, min_storage_gib=2.0, ) async def worker(ctx: WorkContext, tasks): ctx.send_file("./program.py", "/golem/work/program.py") async for task in tasks: ctx.begin() ctx.run("/golem/entrypoints/run.sh") ctx.download_file("/golem/output/results.txt", "results.txt") yield ctx.commit() # TODO: Check if job results are valid # and reject by: task.reject_task(msg = 'invalid file') task.accept_task() ctx.log("no more tasks!") # TODO make this dynamic, e.g. depending on the size of files to transfer # worst-case time overhead for initialization, e.g. negotiation, file transfer etc. # init_overhead: timedelta = timedelta(minutes=3) async with Engine( package=package, max_workers=1, budget=20.0, timeout=timedelta(minutes=10), subnet_tag="testnet", ) as engine: async for progress in engine.map(worker, [Task(data=0)]): print("progress=", progress)
async def main(subnet_tag='testnet'): package = await vm.repo( image_hash='c33ebc0aee1109c6f9d1c02aa67b5800edd6ebe8be2bdc24b85bc4a8', min_mem_gib=0.5, min_storage_gib=2.0, ) async def worker(ctx: WorkContext, tasks): async for task in tasks: ctx.send_file('make.sh', '/golem/work/make.sh') ctx.run('/bin/sh', '/golem/work/make.sh') output_file = 'vim' ctx.download_file('/golem/work/vim/src/vim', 'vim') ctx.download_file('/golem/work/out.txt', 'log') yield ctx.commit() task.accept_task(result=output_file) ctx.log(f'VIM compiled!') tasks: range = range(0, 1, 1) async with Engine( package=package, max_workers=1, budget=10.0, timeout=timedelta(minutes=10), subnet_tag=subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: async for task in engine.map(worker, [Task(data=task) for task in tasks]): print(f'\033[36;1mTask computed: {task}, result: {task.output}\033[0m')
async def main(duration, time_delta, num_trajectories=20, num_nodes=4, subnet_tag="devnet-alpha.2"): package = await vm.repo( image_hash="cea36374b451274ac584f747fcf876ac15865ee319ebb4e7eea94c23", min_mem_gib=0.5, min_storage_gib=5.0, ) print(duration, time_delta, num_trajectories, num_nodes) async def worker(ctx: WorkContext, tasks): async for task in tasks: node_no = task.data ctx.send_file("lorenz.py", "/golem/work/task.py") cmd = f"python3 /golem/work/task.py -d {duration} -n {node_no}/{num_nodes} -m {num_trajectories} -l {time_delta} " print(f"\033[36;1mRunning {cmd}\033[0m") ctx.run("sh", "-c", f"{cmd} >> /golem/output/log.txt 2>&1") ctx.download_file(f"/golem/output/log.txt", "log.txt") task_duration = duration / num_nodes # frame start stop index t0 = int(task_duration / time_delta) * (node_no - 1) t1 = t0 + int(task_duration / time_delta) for t in range(t0, t1): ctx.download_file(f"/golem/output/frame_{t:04d}.png", f"output/frame_{t:04d}.png") yield ctx.commit() task.accept_task() ctx.log("no more task to run") init_overhead: timedelta = timedelta(minutes=10) # By passing `event_emitter=log_summary()` we enable summary logging. # See the documentation of the `yapapi.log` module on how to set # the level of detail and format of the logged information. async with Engine( package=package, max_workers=num_nodes, budget=100.0, timeout=init_overhead + timedelta(minutes=num_nodes * 2), subnet_tag=subnet_tag, event_emitter=log_summary(), ) as engine: async for task in engine.map( worker, [Task(data=i + 1) for i in range(num_nodes)]): print( f"\033[36;1mTask computed: {task}, result: {task.output}\033[0m" ) print(f"\033[36;1mConverting ppng files to a gif animation!\033[0m") i = "output/frame_%04d.png" o = "output.gif" subprocess.call(f"ffmpeg -i {i} {o}", shell=True)
async def main(args): package = await vm.repo( image_hash="e34aaa2aac52cc98993b7d8fdbdcbafc9ba00aa2f38e7fa87796f7b7", min_mem_gib=0.5, min_storage_gib=2.0, ) async def worker(ctx: WorkContext, tasks): async for task in tasks: (filename, offset, secondFrequency, smoothness) = task.data full_filename = filename + ".png" # Send these files so we don't have to update the Docker image ctx.send_file('graphWavePair.py', F'{GOLEM_WORKDIR}graphWavePair.py') ctx.run(F'chmod', 'u+x', F'{GOLEM_WORKDIR}graphWavePair.py') ctx.run(F'/bin/sh', '-c', F'{GOLEM_WORKDIR}graphWavePair.py {filename} {offset} {secondFrequency} {smoothness}') ctx.download_file(f'{GOLEM_WORKDIR+full_filename}', full_filename) yield ctx.commit() task.accept_task(result=full_filename) async with Engine( package=package, max_workers=args.number_of_providers, budget=10.0, # timeout should be keyspace / number of providers dependent timeout=timedelta(minutes=25), subnet_tag=args.subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: # Prepare our parameters for 125 graphs steps = np.arange(0.0, np.pi * 4, 0.1) numSteps = len(steps) inputs = [] for (count, step) in enumerate(steps): filename = "graph-%04d" % (count,) if (step < np.pi * 2): distance = (count) / (numSteps / 2) secondFrequency = interpolate(10, 16, distance) smoothness = interpolate(10, 3, distance) else: distance = (count - numSteps / 2) / (numSteps / 2) secondFrequency = interpolate(16, 10, distance) smoothness = interpolate(3, 10, distance) inputs.append((filename, step, secondFrequency, smoothness)) async for task in engine.map(worker, [Task(data=graphInput) for graphInput in inputs]): print( f"{utils.TEXT_COLOR_CYAN}" f"Task computed: {task}, result: {task.output}" f"{utils.TEXT_COLOR_DEFAULT}" )
async def main(args): write_hash(args.hash) package = await vm.repo( image_hash='294668fd31f6535e65531fc4ea9d72e1377468f32ac8999f9cec89b3', min_mem_gib=1, min_storage_gib=2.0, ) tasks: range = range(1, args.number_of_providers + 1) async def worker(ctx: WorkContext, tasks): async for task in tasks: ctx.send_file('in.hash', '/golem/work/in.hash') ctx.send_file('crack.sh', '/golem/work/crack.sh') ctx.run('touch', '/golem/work/out.txt') ctx.run('/bin/sh', '/golem/work/crack.sh', f'{task.data}', f'{args.number_of_providers}') log_file = f'logs/log_{task.data}.txt' ctx.download_file('/golem/work/log.txt', log_file) output_file = f'outputs/out_{task.data}.txt' ctx.download_file('/golem/work/out.txt', output_file) yield ctx.commit() task.accept_task(result=output_file) password = None async with Engine( package=package, max_workers=args.number_of_providers, budget=10.0, timeout=timedelta(minutes=10), subnet_tag=args.subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: async for task in engine.map(worker, [Task(data=task) for task in tasks]): print(f'\033[36;1mTask computed: {task}, result: {task.output}\033[0m') password = read_password(task.data) if password: break if password is None: print(f"{utils.TEXT_COLOR_RED}No password found{utils.TEXT_COLOR_DEFAULT}") else: print( f"{utils.TEXT_COLOR_GREEN}" f"Password found: {password}" f"{utils.TEXT_COLOR_DEFAULT}" )
async def main(): package = await vm.repo( image_hash="27c5138d399068738471e5f63b5a8161ba57b490b5cf12d0bd7219cc", min_mem_gib=0.5, min_storage_gib=2.0, ) async def worker(ctx: WorkContext, tasks): ctx.send_file("./scene.blend", "/golem/resource/scene.blend") async for task in tasks: frame = task.data ctx.begin() crops = [{ "outfilebasename": "out", "borders_x": [0.0, 1.0], "borders_y": [0.0, 1.0] }] ctx.send_json( "/golem/work/params.json", { "scene_file": "/golem/resource/scene.blend", "resolution": (800, 600), "use_compositing": False, "crops": crops, "samples": 100, "frames": [frame], "output_format": "PNG", "RESOURCES_DIR": "/golem/resources", "WORK_DIR": "/golem/work", "OUTPUT_DIR": "/golem/output", }, ) ctx.run("/golem/entrypoints/run-blender.sh") ctx.download_file(f"/golem/output/out{frame:04d}.png", f"output_{frame}.png") yield ctx.commit() # TODO: Check if job is valid # and reject by: task.reject_task(msg = 'invalid file') task.accept_task() ctx.log("no more frame to render") async with Engine(package=package, max_workers=10, budget=10.0, timeout=timedelta(minutes=15), subnet_tag="R2") as engine: async for progress in engine.map( worker, [Task(data=frame) for frame in range(0, 50, 15)]): print("progress=", progress)
async def main(args): package = await vm.repo( image_hash='8af41741dc1a15fb85d519dce2b42e994ba2494acd4c71a96c5029d2', min_mem_gib=1, min_storage_gib=2.0, ) async def worker(ctx: WorkContext, tasks): async for task in tasks: output_file_name = f'{task.data}.png' output_file_path = f'/golem/work/{output_file_name}' ctx.send_file('plot.sh', '/golem/work/plot.sh') command = ( '/bin/sh', '/golem/work/plot.sh', task.data, data_file, country_codes_file, output_file_path, args.parameter, ) print(*command) ctx.run(*command) yield ctx.commit() ctx.download_file(output_file_path, f'outputs/{output_file_name}') task.accept_task(result=output_file_name) async with Engine( package=package, max_workers=args.workers, budget=10.0, timeout=timedelta(minutes=10), subnet_tag=args.subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: days = dates(args.start, args.end) async for task in engine.map(worker, [Task(data=date) for date in days]): print( f'\033[36;1mTask computed: {task}, result: {task.output}\033[0m' ) gif_name = './covid.gif' generate_gif(gif_name) print(f'{utils.TEXT_COLOR_GREEN}' f'gif generated: {gif_name}' f'{utils.TEXT_COLOR_DEFAULT}')
async def main(args): with open(".image.hash") as f: hash = f.read() package = await vm.repo( image_hash=hash, min_mem_gib=0.5, min_storage_gib=2.0, ) async def worker(ctx: WorkContext, tasks): tasks_count = 0 async for task in tasks: frames = [['0', '1', '2'], ['3', '4']][task.data] framesStr = ' '.join(frames) ctx.run( "/bin/bash", "-c", f"cd {GOLEM_WORKDIR} && /root/plot2.py {framesStr} > {GOLEM_WORKDIR}log.txt 2>&1" ) for frame in frames: ctx.download_file(f"{GOLEM_WORKDIR}log.txt", f"log{task.data}.txt") ctx.download_file(f"{GOLEM_WORKDIR}discontinuous{frame}.png", f"discontinuous{frame}.png") yield ctx.commit() task.accept_task() tasks_count += 1 # if tasks_count < 2: # raise "Need at least 2 tasks!" async with Engine( package=package, max_workers=args.number_of_providers, budget=10.0, # timeout should be keyspace / number of providers dependent timeout=timedelta(minutes=25), subnet_tag=args.subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: inputs = range(2) async for task in engine.map( worker, [Task(data=graphInput) for graphInput in inputs]): print(f"{utils.TEXT_COLOR_CYAN}" f"Task computed: {task}, result: {task.output}" f"{utils.TEXT_COLOR_DEFAULT}")
async def main(): # get number of partitions data_path = Path(DATA_PATH) num_files = len(list(data_path.glob("*"))) partitions = math.floor(num_files / PARTITION_SLICES) logger.info(f"{partitions=}") # calc timeout init_overhead: timedelta = timedelta(minutes=OVERHEAD_MINUTES) timeout = init_overhead + timedelta(minutes=partitions * 2) # get latest pushed package with open("hash_link", "r") as f: hash_link = f.read() package = await vm.repo( image_hash=hash_link.strip(), min_mem_gib=MIN_MEM_GB, min_storage_gib=MIN_STORAGE_GB, ) Path("tmp/").mkdir() start_time = time() # start worker async with Engine( package=package, max_workers=MAX_WORKERS, budget=BUDGET, timeout=timeout, subnet_tag=SUBNET, ) as engine: async for progress in engine.map( worker, [Task(data=partition) for partition in range(0, partitions)]): logger.info(f"{progress=}") logger.info(f"Execution time: {time() - start_time}")
async def main(subnet_tag: str, node_count: int, timeout_seconds: int, password: str): print(f"Parameters: Nodes = {node_count}, Timeout = {timeout_seconds}s, Password = {password}") if node_count < 2: raise Exception("Invalid node_count. There must be 2 or more nodes.") package = await vm.repo( image_hash="c6c34d6462daa7307ace83c08028461411a0e0133d4db053904a89df", min_mem_gib=4, min_storage_gib=3.0, ) async def worker(ctx: WorkContext, tasks): async for task in tasks: output_file = f"out_{str(task.data['node'])}.txt" ctx.run("/golem/entrypoints/crack.sh", str(task.data['node']), str(task.data['nodes']), str(timeout_seconds), password) ctx.log("task data is:") ctx.log(task.data) ctx.download_file("/golem/output/result.txt", output_file) yield ctx.commit() # TODO: Check if job results are valid # and reject by: task.reject_task(reason = 'invalid file') task.accept_task(result=output_file) ctx.log("no more frames to render") # iterator over the frame indices that we want to render nodes = [Task(data={'node': i+1, 'nodes': node_count}) for i in range(node_count)] init_overhead: timedelta = timedelta(minutes=10) # By passing `event_emitter=log_summary()` we enable summary logging. # See the documentation of the `yapapi.log` module on how to set # the level of detail and format of the logged information. async with Engine( package=package, max_workers=node_count, budget=20.0, timeout=init_overhead + timedelta(minutes=node_count * 2), subnet_tag=subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: async for task in engine.map(worker, nodes): print( f"{utils.TEXT_COLOR_CYAN}" f"Task computed: {task}, result: {task.output}" f"{utils.TEXT_COLOR_DEFAULT}" ) # Processing is done, so remind the user of the parameters and show the results print(f"Parameters: Nodes = {node_count}, Timeout = {timeout_seconds}s, Password = {password}") for i in range(node_count): output_file = f"out_{str(i+1)}.txt" with open(output_file) as f: lines = f.readlines() found_message = f"Worker {i+1} did not find the password" for line in lines: if "?" in line: found_message = f"{utils.TEXT_COLOR_YELLOW}Worker {i+1} found the password: {line[2:].strip()}{utils.TEXT_COLOR_DEFAULT}" break print(found_message)
async def main(subnet_tag: str, node_count: int, timeout_seconds: int, password: str): print( f"Stats: Nodes = {node_count}, Timeout = {timeout_seconds}s, Password = {password}" ) if node_count < 2: raise Exception("Invalid node_count. There must be 2 or more nodes.") else: print("Running more than 2 nodes") package = await vm.repo( image_hash="a6722779ca397520db82f354684a46a13c473aca04e38b91ec32755e", min_mem_gib=4, min_storage_gib=3.0, ) async def ripper(ctx: WorkContext, tasks): async for task in tasks: output_file = f"out_{str(task.data['node'])}.txt" #ctx.send_file('ripper.sh', '/golem/entrypoints/ripper.sh') #in case you want to send your modify it bash uncomment ctx.run('bin/sh', '/golem/entrypoints/ripper.sh', str(task.data['node']), str(task.data['nodes']), str(timeout_seconds), password) ctx.log("task data is:") ctx.log(task.data) ctx.download_file("/golem/output/result.txt", output_file) yield ctx.commit() task.accept_task(result=output_file) ctx.log("hmm.. it looks like we got some passwords") # iterator over the frame indices that we want to render nodes = [ Task(data={ 'node': i + 1, 'nodes': node_count }) for i in range(node_count) ] init_overhead: timedelta = timedelta(minutes=10) # By passing `event_emitter=log_summary()` we enable summary logging. # See the documentation of the `yapapi.log` module on how to set # the level of detail and format of the logged information. async with Engine( package=package, max_workers=node_count, budget=30.0, timeout=init_overhead + timedelta(minutes=node_count * 2), subnet_tag=subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: async for task in engine.map(ripper, nodes): print(f"{utils.TEXT_COLOR_CYAN}" f"Task computed: {task}, result: {task.output}" f"{utils.TEXT_COLOR_DEFAULT}") # Process done, so remind the user of the parameters and show the results print( f"Parameters: Nodes = {node_count}, Timeout = {timeout_seconds}s, Password = {password}" ) for i in range(node_count): output_file = f"out_{str(i+1)}.txt" with open(output_file) as f: lines = f.readlines() echoer_message = f"Ripper {i+1} did not find the password" for line in lines: if "?" in line: echoer_message = f"{utils.TEXT_COLOR_GREEN}Worker {i+1} found the password: {line[2:].strip()}{utils.TEXT_COLOR_DEFAULT}" break print(echoer_message)
def test_task(): t: Task[int, None] = Task(data=1) assert t.data == 1
def __wrap_in_yagna_task(self, data: []): """Converts any task data sequence to Yagna wrapper""" for item in data: yield Task(data=item)
async def main(subnet_tag: str): package = await vm.repo( image_hash="9a3b5d67b0b27746283cb5f287c13eab1beaa12d92a9f536b747c7ae", min_mem_gib=0.5, min_storage_gib=2.0, ) async def worker(ctx: WorkContext, tasks): scene_path = str(script_dir / "cubes.blend") ctx.send_file(scene_path, "/golem/resource/scene.blend") async for task in tasks: frame = task.data crops = [{ "outfilebasename": "out", "borders_x": [0.0, 1.0], "borders_y": [0.0, 1.0] }] ctx.send_json( "/golem/work/params.json", { "scene_file": "/golem/resource/scene.blend", "resolution": (400, 300), "use_compositing": False, "crops": crops, "samples": 100, "frames": [frame], "output_format": "PNG", "RESOURCES_DIR": "/golem/resources", "WORK_DIR": "/golem/work", "OUTPUT_DIR": "/golem/output", }, ) ctx.run("/golem/entrypoints/run-blender.sh") output_file = f"output_{frame}.png" ctx.download_file(f"/golem/output/out{frame:04d}.png", output_file) yield ctx.commit() # TODO: Check if job results are valid # and reject by: task.reject_task(reason = 'invalid file') task.accept_task(result=output_file) ctx.log("no more frames to render") # iterator over the frame indices that we want to render frames: range = range(0, 60, 10) # TODO make this dynamic, e.g. depending on the size of files to transfer # worst-case time overhead for initialization, e.g. negotiation, file transfer etc. init_overhead: timedelta = timedelta(minutes=3) # By passing `event_emitter=log_summary()` we enable summary logging. # See the documentation of the `yapapi.log` module on how to set # the level of detail and format of the logged information. async with Engine( package=package, max_workers=3, budget=10.0, timeout=init_overhead + timedelta(minutes=len(frames) * 2), subnet_tag=subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: async for task in engine.map(worker, [Task(data=frame) for frame in frames]): print(f"{utils.TEXT_COLOR_CYAN}" f"Task computed: {task}, result: {task.output}" f"{utils.TEXT_COLOR_DEFAULT}")
async def main(): package = await vm.repo( image_hash="9a3b5d67b0b27746283cb5f287c13eab1beaa12d92a9f536b747c7ae", min_mem_gib=0.5, min_storage_gib=2.0, ) url = 'https://hub.textile.io/ipfs/bafybeihpjl5e3vthvu6y33um4kuw6labeshl5utky556pg2tah4ngflvve' r = requests.get(url) with open('./cubes.blend', 'wb') as f: f.write(r.content) async def worker(ctx: WorkContext, tasks): ctx.send_file("./cubes.blend", "/golem/resource/scene.blend") async for task in tasks: frame = task.data ctx.begin() crops = [{ "outfilebasename": "out", "borders_x": [0.0, 1.0], "borders_y": [0.0, 1.0] }] ctx.send_json( "/golem/work/params.json", { "scene_file": "/golem/resource/scene.blend", "resolution": (400, 300), "use_compositing": False, "crops": crops, "samples": 100, "frames": [frame], "output_format": "PNG", "RESOURCES_DIR": "/golem/resources", "WORK_DIR": "/golem/work", "OUTPUT_DIR": "/golem/output", }, ) ctx.run("/golem/entrypoints/run-blender.sh") ctx.download_file(f"/golem/output/out{frame:04d}.png", f"output_{frame}.png") yield ctx.commit() # TODO: Check if job results are valid # and reject by: task.reject_task(msg = 'invalid file') task.accept_task() ctx.log("no more frames to render") # iterator over the frame indices that we want to render frames: range = range(0, 60, 10) # TODO make this dynamic, e.g. depending on the size of files to transfer # worst-case time overhead for initialization, e.g. negotiation, file transfer etc. init_overhead: timedelta = timedelta(minutes=3) async with Engine( package=package, max_workers=3, budget=10.0, timeout=init_overhead + timedelta(minutes=len(frames) * 2), subnet_tag="testnet", ) as engine: async for progress in engine.map( worker, [Task(data=frame) for frame in frames]): print("progress=", progress)
async def inner(*args, **kwargs): # Firstly, we'll save the function body to file tmpdir = tempfile.TemporaryDirectory() module_path = PurePath(f"{tmpdir.name}/gfaas_module") with open(module_path, "wb") as f: marshal.dump(func.__code__, f) if self.run_local: import asyncio fut = self.engine.submit(_local_remote_fn, module_path, *args) res = await asyncio.wait_for(asyncio.wrap_future(fut), self.timeout.seconds) return res else: from yapapi.runner import Engine, Task, vm from yapapi.runner.ctx import WorkContext from yapapi.log import enable_default_logger, log_summary # Save input args to files saved_args = [] for i, arg in enumerate(args): arg_path = PurePath(f"{tmpdir.name}/arg{i}") with open(arg_path, "w") as f: json.dump(arg, f) saved_args.append(arg_path) enable_default_logger() package = await vm.repo( image_hash= "74e9cdb5a5aa2c73a54f9ebf109986801fe2d4f026ea7d9fbfcca221", min_mem_gib=0.5, min_storage_gib=2.0, ) out_path = PurePath(f"{tmpdir.name}/out") async def worker(ctx: WorkContext, tasks): async for task in tasks: ctx.send_file(module_path, "/golem/input/func") remote_args = [] for (i, arg_path) in enumerate(saved_args): remote_arg = f"/golem/input/arg{i}" ctx.send_file(arg_path, remote_arg) remote_args.append(remote_arg) ctx.run("python", "/golem/runner.py", "/golem/input/func", *remote_args) ctx.download_file("/golem/output/out", out_path) yield ctx.commit() task.accept_task(result=out_path) ctx.log("done") init_overhead: timedelta = timedelta(minutes=3) async with Engine( package=package, max_workers=1, budget=self.budget, timeout=init_overhead + self.timeout, subnet_tag=self.subnet, event_emitter=log_summary(), ) as engine: async for progress in engine.map(worker, [Task(data=None)]): print(f"progress={progress}") with open(out_path, "r") as f: out = json.load(f) return out
async def main(subnet_tag: str): package = await vm.repo( image_hash="83b5ebab52f39e676173de32f56cf2648c136050b8fa1f31a791c467", min_mem_gib=0.5, min_storage_gib=2.0, ) async def worker(ctx: WorkContext, tasks): ctx.send_file( "./lens.py", "/golem/work/lens.py", ) async for task in tasks: feed = task.data ctx.send_json( "/golem/work/params.json", feed, ) commands = ( "python3 /golem/work/lens.py >> /golem/output/task-log 2>&1;") ctx.run("/bin/sh", "-c", commands) frame_start = feed["start_frame"] frame_end = feed["start_frame"] + len(feed["points"]) frames = range(frame_start, frame_end) ctx.log(f"Downloading frames {frame_start}-{frame_end}...") for frame in frames: ctx.download_file(f"/golem/output/{frame}.png", f"out/{frame + 100}.png") output = f"task-log" ctx.download_file(f"/golem/output/task-log", f"out/{output}") yield ctx.commit() # TODO: Check if job results are valid # and reject by: task.reject_task(reason = 'invalid file') task.accept_task(result=output) ctx.log("no more frames to render") points = np.arange(0.001, 1.0, 0.005) feeds = [] for i in range(len(points) // 10): feed = { "start_frame": 10 * i, "points": [points[i] for i in range(10 * i, 10 * (i + 1))] } feeds.append(feed) # By passing `event_consumer=log_summary()` we enable summary logging. # See the documentation of the `yapapi.log` module on how to set # the level of detail and format of the logged information. async with Engine( package=package, max_workers=8, budget=100.0, timeout=timedelta(minutes=3) + timedelta(minutes=10), subnet_tag=subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: async for task in engine.map(worker, [Task(data=feed) for feed in feeds]): print(f"{utils.TEXT_COLOR_CYAN}" f"Task computed: {task}, result: {task.output}" f"{utils.TEXT_COLOR_DEFAULT}")
async def main(subnet_tag: str, presets: str, input_file: str, job: str): package = await vm.repo( image_hash="9a6b2409ccd96b3352b3fcdc6e1e568bdfa4d4e258589cfaff084786", min_mem_gib=0.5, min_storage_gib=2.0, ) preset_list = list(filter(str.strip, presets.split(","))) async def worker(ctx: WorkContext, tasks): sent_file = "/golem/resource/input.file" ctx.send_file(input_file, sent_file) async for task in tasks: preset = task.data if "MKV" in preset or preset == "Roku 2160p60 4K HEVC Surround": output_ext = ".mkv" else: output_ext = ".mp4" subjob = f"{job}_{preset_list.index(preset)+1}" update_status(subjob, "starting") output_file = f"{subjob}{output_ext}" ctx.log(f"job: {subjob}") update_status(subjob, "job sent") commands = ( 'cd /golem/output; ' f'echo preset:{preset} > log.txt; ' f'echo output_file:{output_file} >> log.txt; ' f"HandBrakeCLI -i {sent_file} -o {output_file} --preset '{preset}' >> log.txt 2>&1; " 'ls -lah >> log.txt') ctx.run("/bin/sh", "-c", commands) #ctx.download_file(f"/golem/output/log.txt", "log.txt") ctx.download_file(f"/golem/output/{output_file}", f"./downloads/{output_file}") yield ctx.commit() update_status(subjob, "done") # TODO: Check if job results are valid # and reject by: task.reject_task(reason = 'invalid file') task.accept_task(result=output_file) ctx.log("no more videos to convert!") init_overhead: timedelta = timedelta(minutes=7) # By passing `event_emitter=log_summary()` we enable summary logging. # See the documentation of the `yapapi.log` module on how to set # the level of detail and format of the logged information. async with Engine( package=package, max_workers=10, budget=20.0, timeout=init_overhead, subnet_tag=subnet_tag, event_emitter=log_summary(log_event_repr), ) as engine: async for task in engine.map( worker, [Task(data=preset) for preset in preset_list]): print(f"{utils.TEXT_COLOR_CYAN}" f"Task computed: {task}, result: {task.output}" f"{utils.TEXT_COLOR_DEFAULT}")