Beispiel #1
0
async def main(subnet_tag='testnet'):
    package = await vm.repo(
        image_hash='c33ebc0aee1109c6f9d1c02aa67b5800edd6ebe8be2bdc24b85bc4a8',
        min_mem_gib=0.5,
        min_storage_gib=2.0,
    )

    async def worker(ctx: WorkContext, tasks):
        async for task in tasks:
            ctx.send_file('make.sh', '/golem/work/make.sh')
            ctx.run('/bin/sh', '/golem/work/make.sh')
            output_file = 'vim'
            ctx.download_file('/golem/work/vim/src/vim', 'vim')
            ctx.download_file('/golem/work/out.txt', 'log')
            yield ctx.commit()
            task.accept_task(result=output_file)

        ctx.log(f'VIM compiled!')

    tasks: range = range(0, 1, 1)

    async with Engine(
        package=package,
        max_workers=1,
        budget=10.0,
        timeout=timedelta(minutes=10),
        subnet_tag=subnet_tag,
        event_emitter=log_summary(log_event_repr),
    ) as engine:

        async for task in engine.map(worker, [Task(data=task) for task in tasks]):
            print(f'\033[36;1mTask computed: {task}, result: {task.output}\033[0m')
Beispiel #2
0
async def main(subnet_tag='testnet'):
    package = await vm.repo(
        image_hash="746f7d86703c932d83809bf3ecc517450c5e23d693c9788e85b9682e",
        min_mem_gib=1,
        min_storage_gib=2.0,
    )

    async def worker(ctx: WorkContext, tasks):
        async for task in tasks:
            ctx.send_file('fire.sh', '/golem/work/fire.sh')
            ctx.run('/bin/sh', '/golem/work/fire.sh')
            output_file = f"vim"
            ctx.download_file('/golem/work/vim/src/vim', 'vim')
            ctx.download_file('/golem/work/log.txt', 'log')
            yield ctx.commit()
            task.accept_task(result=output_file)

        ctx.log(f'done!')

    tasks: range = range(0, 1, 1)

    async with Engine(
            package=package,
            max_workers=1,
            budget=10.0,
            timeout=timedelta(minutes=10),
            subnet_tag=subnet_tag,
            event_emitter=log_summary(log_event_repr),
    ) as engine:

        async for task in engine.map(worker,
                                     [Task(data=task) for task in tasks]):
            print(
                f'\033[36;1mTask computed: {task}, result: {task.output}\033[0m'
            )
Beispiel #3
0
async def main(subnet_tag: str):
    package = await vm.repo(
        image_hash="7c63ebd87868e27eb99a687d1175e77450d7b3ff73fc86e6bdcef37b",
        min_mem_gib=4.0,
        min_storage_gib=8.0,
    )

    async def worker(ctx: WorkContext, tasks):
        async for task in tasks:
            ctx.run("/golem/entrypoints/runold.sh")
            output_file = "output.txt"
            ctx.download_file("/golem/output/output.txt", output_file)
            try:
                # Set timeout for executing the script on the provider. Two minutes is plenty
                # of time for computing a single frame, for other tasks it may be not enough.
                # If the timeout is exceeded, this worker instance will be shut down and all
                # remaining tasks, including the current one, will be computed by other providers.
                yield ctx.commit(timeout=timedelta(seconds=18000))
                # TODO: Check if job results are valid
                # and reject by: task.reject_task(reason = 'invalid file')
                task.accept_result(result=output_file)
            except BatchTimeoutError:
                print(f"{utils.TEXT_COLOR_RED}"
                      f"Task timed out: {task}, time: {task.running_time}"
                      f"{utils.TEXT_COLOR_DEFAULT}")
                raise

    # Iterator over the frame indices that we want to render
    frames: range = range(0, 1)
    # Worst-case overhead, in minutes, for initialization (negotiation, file transfer etc.)
    # TODO: make this dynamic, e.g. depending on the size of files to transfer
    init_overhead = 3
    # Providers will not accept work if the timeout is outside of the [5 min, 30min] range.
    # We increase the lower bound to 6 min to account for the time needed for our demand to
    # reach the providers.
    min_timeout, max_timeout = 30, 150

    timeout = timedelta(
        minutes=max(min(init_overhead +
                        len(frames) * 2, max_timeout), min_timeout))

    # By passing `event_consumer=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Executor(
        package=package,
        max_workers=3,
        budget=10.0,
        timeout=timeout,
        subnet_tag=subnet_tag,
        event_consumer=log_summary(log_event_repr),
    ) as executor:

        async for task in executor.submit(
                worker, [Task(data=frame) for frame in frames]):
            print(
                f"{utils.TEXT_COLOR_CYAN}"
                f"Task computed: {task}, result: {task.result}, time: {task.running_time}"
                f"{utils.TEXT_COLOR_DEFAULT}")
Beispiel #4
0
async def main(duration,
               time_delta,
               num_trajectories=20,
               num_nodes=4,
               subnet_tag="devnet-alpha.2"):
    package = await vm.repo(
        image_hash="cea36374b451274ac584f747fcf876ac15865ee319ebb4e7eea94c23",
        min_mem_gib=0.5,
        min_storage_gib=5.0,
    )
    print(duration, time_delta, num_trajectories, num_nodes)

    async def worker(ctx: WorkContext, tasks):
        async for task in tasks:
            node_no = task.data
            ctx.send_file("lorenz.py", "/golem/work/task.py")
            cmd = f"python3 /golem/work/task.py -d {duration} -n {node_no}/{num_nodes} -m {num_trajectories} -l {time_delta} "
            print(f"\033[36;1mRunning {cmd}\033[0m")
            ctx.run("sh", "-c", f"{cmd} >> /golem/output/log.txt 2>&1")
            ctx.download_file(f"/golem/output/log.txt", "log.txt")
            task_duration = duration / num_nodes
            # frame start stop index
            t0 = int(task_duration / time_delta) * (node_no - 1)
            t1 = t0 + int(task_duration / time_delta)
            for t in range(t0, t1):
                ctx.download_file(f"/golem/output/frame_{t:04d}.png",
                                  f"output/frame_{t:04d}.png")
            yield ctx.commit()
            task.accept_task()

        ctx.log("no more task to run")

    init_overhead: timedelta = timedelta(minutes=10)

    # By passing `event_emitter=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Engine(
        package=package,
        max_workers=num_nodes,
        budget=100.0,
        timeout=init_overhead + timedelta(minutes=num_nodes * 2),
        subnet_tag=subnet_tag,
        event_emitter=log_summary(),
    ) as engine:
        async for task in engine.map(
                worker, [Task(data=i + 1) for i in range(num_nodes)]):
            print(
                f"\033[36;1mTask computed: {task}, result: {task.output}\033[0m"
            )

    print(f"\033[36;1mConverting ppng files to a gif animation!\033[0m")
    i = "output/frame_%04d.png"
    o = "output.gif"
    subprocess.call(f"ffmpeg  -i {i} {o}", shell=True)
async def main(subnet_tag: str, app: App, batches: List[Batch]):
    package = await vm.repo(
        # using existing image for 'blender' example
        image_hash="9a3b5d67b0b27746283cb5f287c13eab1beaa12d92a9f536b747c7ae",
        min_mem_gib=1.0,
        min_storage_gib=2.0,
    )

    async def worker(ctx: WorkContext, tasks):
        cwd = pathlib.Path.cwd()
        async for task in tasks:
            batch = task.data
            try:
                os.remove(batch.local_out(cwd))
            except:
                pass

            for exe in batch.all_executables():
                ctx.send_file(batch.local_exe(cwd, exe), batch.remote_exe(exe))
            batch.make_local_sh(cwd)
            ctx.send_file(batch.local_sh(cwd), batch.remote_sh())
            ctx.run("/bin/bash", batch.remote_sh())
            ctx.download_file(batch.remote_out(), batch.local_out(cwd))
            try:
                yield ctx.commit(timeout=timedelta(seconds=batch.timeout()))
                task.accept_result(result=batch.local_out(cwd))
            except BatchTimeoutError:
                print(
                    f"{text_colors.RED}"
                    f"Task timed out: {task.data.id}, time: {task.running_time}"
                    f"{text_colors.DEFAULT}")
                raise

    # Worst-case overhead, in minutes, for initialization (negotiation, file transfer etc.)
    timeout = timedelta(minutes=app.args.time_out)

    # By passing `event_consumer=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Executor(
        package=package,
        max_workers=len(batches),
        budget=10.0,
        timeout=timeout,
        subnet_tag=subnet_tag,
        event_consumer=log_summary(log_event_repr),
    ) as executor:

        async for task in executor.submit(
                worker, [Task(data=batch) for batch in batches]):
            print(
                f"{text_colors.CYAN}"
                f"Task computed: {task.data.id}, result: {task.result}, time: {task.running_time}"
                f"{text_colors.DEFAULT}")
async def main(args):

    package = await vm.repo(
        image_hash="e34aaa2aac52cc98993b7d8fdbdcbafc9ba00aa2f38e7fa87796f7b7",
        min_mem_gib=0.5,
        min_storage_gib=2.0,
    )

    async def worker(ctx: WorkContext, tasks):
        async for task in tasks:
            (filename, offset, secondFrequency, smoothness) = task.data
            full_filename = filename + ".png"
            # Send these files so we don't have to update the Docker image
            ctx.send_file('graphWavePair.py', F'{GOLEM_WORKDIR}graphWavePair.py')
            ctx.run(F'chmod', 'u+x', F'{GOLEM_WORKDIR}graphWavePair.py')
            ctx.run(F'/bin/sh', '-c', F'{GOLEM_WORKDIR}graphWavePair.py {filename} {offset} {secondFrequency} {smoothness}')
            ctx.download_file(f'{GOLEM_WORKDIR+full_filename}', full_filename)

            yield ctx.commit()
            task.accept_task(result=full_filename)

    async with Engine(
        package=package,
        max_workers=args.number_of_providers,
        budget=10.0,
        # timeout should be keyspace / number of providers dependent
        timeout=timedelta(minutes=25),
        subnet_tag=args.subnet_tag,
        event_emitter=log_summary(log_event_repr),
    ) as engine:

        # Prepare our parameters for 125 graphs
        steps = np.arange(0.0, np.pi * 4, 0.1)
        numSteps = len(steps)
        inputs = []
        for (count, step) in enumerate(steps):
            filename = "graph-%04d" % (count,)
            if (step < np.pi * 2):
                distance = (count) / (numSteps / 2)
                secondFrequency = interpolate(10, 16, distance)
                smoothness = interpolate(10, 3, distance)
            else:
                distance = (count - numSteps / 2) / (numSteps / 2)
                secondFrequency = interpolate(16, 10, distance)
                smoothness = interpolate(3, 10, distance)
            inputs.append((filename, step, secondFrequency, smoothness))

        async for task in engine.map(worker, [Task(data=graphInput) for graphInput in inputs]):
            print(
                f"{utils.TEXT_COLOR_CYAN}"
                f"Task computed: {task}, result: {task.output}"
                f"{utils.TEXT_COLOR_DEFAULT}"
            )
Beispiel #7
0
 def __create_engine(self):
     """Creates yagna engine"""
     return Engine(
         package=self.package,
         max_workers=self.max_workers,
         budget=self.budget,
         timeout=timedelta(minutes=25),
         subnet_tag=self.subnet_tag,
         # By passing `event_emitter=log_summary()` we enable summary logging.
         # See the documentation of the `yapapi.log` module on how to set
         # the level of detail and format of the logged information.
         event_emitter=log_summary(log_event_repr),
     )
Beispiel #8
0
async def main(args):

    write_hash(args.hash)

    package = await vm.repo(
        image_hash='294668fd31f6535e65531fc4ea9d72e1377468f32ac8999f9cec89b3',
        min_mem_gib=1,
        min_storage_gib=2.0,
    )

    tasks: range = range(1, args.number_of_providers + 1)

    async def worker(ctx: WorkContext, tasks):
        async for task in tasks:
            ctx.send_file('in.hash', '/golem/work/in.hash')
            ctx.send_file('crack.sh', '/golem/work/crack.sh')
            ctx.run('touch', '/golem/work/out.txt')
            ctx.run('/bin/sh', '/golem/work/crack.sh', f'{task.data}', f'{args.number_of_providers}')
            log_file = f'logs/log_{task.data}.txt'
            ctx.download_file('/golem/work/log.txt', log_file)
            output_file = f'outputs/out_{task.data}.txt'
            ctx.download_file('/golem/work/out.txt', output_file)
            yield ctx.commit()
            task.accept_task(result=output_file)

    password = None

    async with Engine(
        package=package,
        max_workers=args.number_of_providers,
        budget=10.0,
        timeout=timedelta(minutes=10),
        subnet_tag=args.subnet_tag,
        event_emitter=log_summary(log_event_repr),
    ) as engine:

        async for task in engine.map(worker, [Task(data=task) for task in tasks]):
            print(f'\033[36;1mTask computed: {task}, result: {task.output}\033[0m')

            password = read_password(task.data)
            if password:
                break

    if password is None:
        print(f"{utils.TEXT_COLOR_RED}No password found{utils.TEXT_COLOR_DEFAULT}")
    else:
        print(
            f"{utils.TEXT_COLOR_GREEN}"
            f"Password found: {password}"
            f"{utils.TEXT_COLOR_DEFAULT}"
        )
Beispiel #9
0
async def main(args):

    package = await vm.repo(
        image_hash='8af41741dc1a15fb85d519dce2b42e994ba2494acd4c71a96c5029d2',
        min_mem_gib=1,
        min_storage_gib=2.0,
    )

    async def worker(ctx: WorkContext, tasks):
        async for task in tasks:
            output_file_name = f'{task.data}.png'
            output_file_path = f'/golem/work/{output_file_name}'
            ctx.send_file('plot.sh', '/golem/work/plot.sh')
            command = (
                '/bin/sh',
                '/golem/work/plot.sh',
                task.data,
                data_file,
                country_codes_file,
                output_file_path,
                args.parameter,
            )
            print(*command)
            ctx.run(*command)
            yield ctx.commit()
            ctx.download_file(output_file_path, f'outputs/{output_file_name}')
            task.accept_task(result=output_file_name)

    async with Engine(
            package=package,
            max_workers=args.workers,
            budget=10.0,
            timeout=timedelta(minutes=10),
            subnet_tag=args.subnet_tag,
            event_emitter=log_summary(log_event_repr),
    ) as engine:

        days = dates(args.start, args.end)
        async for task in engine.map(worker,
                                     [Task(data=date) for date in days]):
            print(
                f'\033[36;1mTask computed: {task}, result: {task.output}\033[0m'
            )

    gif_name = './covid.gif'
    generate_gif(gif_name)

    print(f'{utils.TEXT_COLOR_GREEN}'
          f'gif generated: {gif_name}'
          f'{utils.TEXT_COLOR_DEFAULT}')
Beispiel #10
0
    async def run_step(self, step, timeout=timedelta(minutes=10), budget=10, subnet_tag="community.3" ):
        package = await vm.repo(
            image_hash=step["image"],
            min_mem_gib=1,
            min_storage_gib=5.0,
        )
        async def worker(ctx: WorkContext, tasks):
            async for task in tasks:
                step_name = step['name']
                commands = step['commands']
                # prepair envs in string form of: "k1=v1 k2=v2 ... kn=vn "
                envs = step.get('environment')
                print(f"\033[36;1mSending the context zip file: {self.tar_fname}\033[0m")
                ctx.send_file(self.tar_fname , "/golem/resource/context.zip")
                # extracting tar file.
                print(f"\033[36;1mExtracting the zip file: {self.tar_fname}\033[0m")
                ctx.run("/bin/sh", "-c", "unzip /golem/resource/context.zip")
                # run all commands one by one
                for command in commands:
                    print(f"\033[36;1mRunning {command}\033[0m")
                    # set envs.
                    ctx.run("/bin/sh", "-c", f"{command} >> /golem/output/cmd.log 2>&1", env=envs)
                log_fname = get_temp_log_file(step_name)
                ctx.download_file(f"/golem/output/cmd.log", log_fname)
                try:
                    yield ctx.commit(timeout=timedelta(minutes=30))
                    task.accept_result(result=log_fname)
                except BatchTimeoutError:
                    print(f"Task timed out: {task}, time: {task.running_time}")
                    raise
            ctx.log("no more task to run")

        # By passing `event_emitter=log_summary()` we enable summary logging.
        # See the documentation of the `yapapi.log` module on how to set
        # the level of detail and format of the logged information.
        async with Executor(
            package=package,
            max_workers=1,
            budget=budget,
            timeout=timeout,
            subnet_tag=subnet_tag,
            event_consumer=log_summary(log_event_repr),
        ) as executer:
            async for task in executer.submit(worker, [Task(data=step)]):
                print(f"\033[36;1mStep completed: {task}\033[0m")
                # grab the logs
                self.state[step['name']]['log'] = task.result
                # notify about this task!
                self.state[step['name']]['state'] = StepState.SUCCESS
                self.post_progress(step['name'])
Beispiel #11
0
async def main(args):

    with open(".image.hash") as f:
        hash = f.read()
    package = await vm.repo(
        image_hash=hash,
        min_mem_gib=0.5,
        min_storage_gib=2.0,
    )

    async def worker(ctx: WorkContext, tasks):
        tasks_count = 0
        async for task in tasks:
            frames = [['0', '1', '2'], ['3', '4']][task.data]
            framesStr = ' '.join(frames)
            ctx.run(
                "/bin/bash", "-c",
                f"cd {GOLEM_WORKDIR} && /root/plot2.py {framesStr} > {GOLEM_WORKDIR}log.txt 2>&1"
            )
            for frame in frames:
                ctx.download_file(f"{GOLEM_WORKDIR}log.txt",
                                  f"log{task.data}.txt")
                ctx.download_file(f"{GOLEM_WORKDIR}discontinuous{frame}.png",
                                  f"discontinuous{frame}.png")
            yield ctx.commit()
            task.accept_task()
            tasks_count += 1
        # if tasks_count < 2:
        #     raise "Need at least 2 tasks!"

    async with Engine(
            package=package,
            max_workers=args.number_of_providers,
            budget=10.0,
            # timeout should be keyspace / number of providers dependent
            timeout=timedelta(minutes=25),
            subnet_tag=args.subnet_tag,
            event_emitter=log_summary(log_event_repr),
    ) as engine:

        inputs = range(2)

        async for task in engine.map(
                worker, [Task(data=graphInput) for graphInput in inputs]):
            print(f"{utils.TEXT_COLOR_CYAN}"
                  f"Task computed: {task}, result: {task.output}"
                  f"{utils.TEXT_COLOR_DEFAULT}")
async def main():
    package = await vm.repo(
        image_hash="c0317d4db8930afde1862f27973ee2f5b766c4d50a87409406e2e23f",
        min_mem_gib=2,
        min_storage_gib=2.5,
    )

    async def worker_train_model(ctx: WorkContext, tasks):
        async for task in tasks:
            global_round = task.data['global_round']
            node_id = task.data['node_id']
            model_path = os.path.join(ROUND_WEIGHTS_FOLDER,
                                      f'round_{global_round - 1}.h5')
            ctx.send_file(model_path,
                          f"/golem/work/model_{global_round - 1}.h5")
            specs = {
                'start': task.data['start'],
                'end': task.data['end'],
                'batch_size': BATCH_SIZE,
                'model_path': f'model_{global_round - 1}.h5',
                'epochs': PROVIDER_EPOCHS,
                'global_round': task.data['global_round'],
                'node_number': task.data['node_id']
            }
            ctx.send_json(
                "/golem/work/specs.json",
                specs,
            )
            ctx.send_file('client.py', "/golem/work/client.py")
            ctx.run("/bin/sh", "-c", "python3 client.py")
            node_model_output = f'/golem/output/model_round_{global_round}_{node_id}.h5'
            node_log_file = f'/golem/output/log_round_{global_round}_{node_id}.json'
            ctx.download_file(
                node_model_output,
                os.path.join(WORKER_MODEL_WEIGHTS_FOLDER,
                             f'round_{global_round}_worker_{node_id}.h5'))
            ctx.download_file(
                node_log_file,
                os.path.join(
                    WORKER_LOGS_FOLDER,
                    f'log_round_{global_round}_worker_{node_id}.json'))
            yield ctx.commit(timeout=timedelta(minutes=7))
            task.accept_result()

    print(f"{TEXT_COLOR_GREEN}"
          f"Initialising your model."
          f"{TEXT_COLOR_DEFAULT}")
    model = get_compiled_model()
    print(f"{TEXT_COLOR_GREEN}" f"Loading the data" f"{TEXT_COLOR_DEFAULT}")
    training_dataset, testing_dataset, train_length, test_length = load_dataset(
        BATCH_SIZE)
    print(f"{TEXT_COLOR_GREEN}"
          f"Initial model evaluation - "
          f"{TEXT_COLOR_DEFAULT}")
    eval_results = model.evaluate(testing_dataset)
    print(f"{TEXT_COLOR_BLUE}"
          f"ROUND 0 | Loss: {eval_results[0]} | Accuracy: {eval_results[1]}"
          f"{TEXT_COLOR_DEFAULT}")
    print(f"{TEXT_COLOR_MAGENTA}"
          f"Saving Model Weights for round 0"
          f"{TEXT_COLOR_DEFAULT}")
    model.save(os.path.join(ROUND_WEIGHTS_FOLDER, 'round_0.h5'))

    for global_round_number in range(1, GLOBAL_TRAINING_ROUNDS + 1):
        print(f"{TEXT_COLOR_GREEN}"
              f"Beginning Training Round {global_round_number}"
              f"{TEXT_COLOR_DEFAULT}")
        async with Executor(
                package=package,
                max_workers=NUM_PROVIDERS,
                budget=20.0,
                timeout=timedelta(minutes=29),
                subnet_tag=SUBNET_TAG,
                event_consumer=log_summary(log_event_repr),
        ) as executor:

            # No problem if we miss a few samples
            training_subset_steps = int(train_length / NUM_PROVIDERS)
            executor_tasks = [
                Task(
                    data={
                        'start': x,
                        'end': x + training_subset_steps,
                        'global_round': global_round_number,
                        'node_id': index + 1
                    }) for index, x in enumerate(
                        list(range(0, train_length, training_subset_steps)))
            ]
            async for task in executor.submit(worker_train_model,
                                              executor_tasks):
                print(
                    f"{TEXT_COLOR_CYAN}"
                    f"Training round {global_round_number} completed on provider node {task.data['node_id']}"
                    f"{TEXT_COLOR_DEFAULT}")

        all_worker_weights = get_client_model_weights(
            WORKER_MODEL_WEIGHTS_FOLDER, global_round_number)
        averaged_weights = federated_avg_weights(all_worker_weights)
        model.set_weights(averaged_weights)

        print(f"{TEXT_COLOR_GREEN}"
              f"TRAINING ROUND {global_round_number} complete!"
              f"{TEXT_COLOR_DEFAULT}")
        eval_results = model.evaluate(testing_dataset)
        print(
            f"{TEXT_COLOR_BLUE}"
            f"ROUND {global_round_number} | Loss: {eval_results[0]} | Accuracy: {eval_results[1]}"
            f"{TEXT_COLOR_DEFAULT}")
        print(f"{TEXT_COLOR_MAGENTA}"
              f"Saving Model Weights for round {global_round_number}"
              f"{TEXT_COLOR_DEFAULT}")
        model.save(
            os.path.join(ROUND_WEIGHTS_FOLDER,
                         f'round_{global_round_number}.h5'))
async def main(subnet_tag: str):
    package = await vm.repo(
        image_hash="83b5ebab52f39e676173de32f56cf2648c136050b8fa1f31a791c467",
        min_mem_gib=0.5,
        min_storage_gib=2.0,
    )

    async def worker(ctx: WorkContext, tasks):
        ctx.send_file(
            "./lens.py",
            "/golem/work/lens.py",
        )
        async for task in tasks:
            feed = task.data
            ctx.send_json(
                "/golem/work/params.json",
                feed,
            )
            commands = (
                "python3 /golem/work/lens.py >> /golem/output/task-log 2>&1;")
            ctx.run("/bin/sh", "-c", commands)
            frame_start = feed["start_frame"]
            frame_end = feed["start_frame"] + len(feed["points"])
            frames = range(frame_start, frame_end)
            ctx.log(f"Downloading frames {frame_start}-{frame_end}...")
            for frame in frames:
                ctx.download_file(f"/golem/output/{frame}.png",
                                  f"out/{frame + 100}.png")
            output = f"task-log"
            ctx.download_file(f"/golem/output/task-log", f"out/{output}")
            yield ctx.commit()
            # TODO: Check if job results are valid
            # and reject by: task.reject_task(reason = 'invalid file')
            task.accept_task(result=output)

        ctx.log("no more frames to render")

    points = np.arange(0.001, 1.0, 0.005)
    feeds = []
    for i in range(len(points) // 10):
        feed = {
            "start_frame": 10 * i,
            "points": [points[i] for i in range(10 * i, 10 * (i + 1))]
        }
        feeds.append(feed)

    # By passing `event_consumer=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Engine(
            package=package,
            max_workers=8,
            budget=100.0,
            timeout=timedelta(minutes=3) + timedelta(minutes=10),
            subnet_tag=subnet_tag,
            event_emitter=log_summary(log_event_repr),
    ) as engine:

        async for task in engine.map(worker,
                                     [Task(data=feed) for feed in feeds]):
            print(f"{utils.TEXT_COLOR_CYAN}"
                  f"Task computed: {task}, result: {task.output}"
                  f"{utils.TEXT_COLOR_DEFAULT}")
Beispiel #14
0
async def main(subnet_tag: str):
    package = await vm.repo(
        image_hash="9a3b5d67b0b27746283cb5f287c13eab1beaa12d92a9f536b747c7ae",
        min_mem_gib=0.5,
        min_storage_gib=2.0,
    )

    async def worker(ctx: WorkContext, tasks):
        scene_path = str(script_dir / "cubes.blend")
        ctx.send_file(scene_path, "/golem/resource/scene.blend")
        async for task in tasks:
            frame = task.data
            crops = [{
                "outfilebasename": "out",
                "borders_x": [0.0, 1.0],
                "borders_y": [0.0, 1.0]
            }]
            ctx.send_json(
                "/golem/work/params.json",
                {
                    "scene_file": "/golem/resource/scene.blend",
                    "resolution": (400, 300),
                    "use_compositing": False,
                    "crops": crops,
                    "samples": 100,
                    "frames": [frame],
                    "output_format": "PNG",
                    "RESOURCES_DIR": "/golem/resources",
                    "WORK_DIR": "/golem/work",
                    "OUTPUT_DIR": "/golem/output",
                },
            )
            ctx.run("/golem/entrypoints/run-blender.sh")
            output_file = f"output_{frame}.png"
            ctx.download_file(f"/golem/output/out{frame:04d}.png", output_file)
            yield ctx.commit()
            # TODO: Check if job results are valid
            # and reject by: task.reject_task(reason = 'invalid file')
            task.accept_task(result=output_file)

        ctx.log("no more frames to render")

    # iterator over the frame indices that we want to render
    frames: range = range(0, 60, 10)
    # TODO make this dynamic, e.g. depending on the size of files to transfer
    # worst-case time overhead for initialization, e.g. negotiation, file transfer etc.
    init_overhead: timedelta = timedelta(minutes=3)

    # By passing `event_emitter=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Engine(
        package=package,
        max_workers=3,
        budget=10.0,
        timeout=init_overhead + timedelta(minutes=len(frames) * 2),
        subnet_tag=subnet_tag,
        event_emitter=log_summary(log_event_repr),
    ) as engine:

        async for task in engine.map(worker,
                                     [Task(data=frame) for frame in frames]):
            print(f"{utils.TEXT_COLOR_CYAN}"
                  f"Task computed: {task}, result: {task.output}"
                  f"{utils.TEXT_COLOR_DEFAULT}")
Beispiel #15
0
        async def inner(*args, **kwargs):
            # Firstly, we'll save the function body to file
            tmpdir = tempfile.TemporaryDirectory()
            module_path = PurePath(f"{tmpdir.name}/gfaas_module")
            with open(module_path, "wb") as f:
                marshal.dump(func.__code__, f)

            if self.run_local:
                import asyncio

                fut = self.engine.submit(_local_remote_fn, module_path, *args)
                res = await asyncio.wait_for(asyncio.wrap_future(fut),
                                             self.timeout.seconds)
                return res

            else:
                from yapapi.runner import Engine, Task, vm
                from yapapi.runner.ctx import WorkContext
                from yapapi.log import enable_default_logger, log_summary

                # Save input args to files
                saved_args = []
                for i, arg in enumerate(args):
                    arg_path = PurePath(f"{tmpdir.name}/arg{i}")
                    with open(arg_path, "w") as f:
                        json.dump(arg, f)
                    saved_args.append(arg_path)

                enable_default_logger()
                package = await vm.repo(
                    image_hash=
                    "74e9cdb5a5aa2c73a54f9ebf109986801fe2d4f026ea7d9fbfcca221",
                    min_mem_gib=0.5,
                    min_storage_gib=2.0,
                )
                out_path = PurePath(f"{tmpdir.name}/out")

                async def worker(ctx: WorkContext, tasks):
                    async for task in tasks:
                        ctx.send_file(module_path, "/golem/input/func")
                        remote_args = []

                        for (i, arg_path) in enumerate(saved_args):
                            remote_arg = f"/golem/input/arg{i}"
                            ctx.send_file(arg_path, remote_arg)
                            remote_args.append(remote_arg)

                        ctx.run("python", "/golem/runner.py",
                                "/golem/input/func", *remote_args)
                        ctx.download_file("/golem/output/out", out_path)
                        yield ctx.commit()
                        task.accept_task(result=out_path)

                    ctx.log("done")

                init_overhead: timedelta = timedelta(minutes=3)

                async with Engine(
                        package=package,
                        max_workers=1,
                        budget=self.budget,
                        timeout=init_overhead + self.timeout,
                        subnet_tag=self.subnet,
                        event_emitter=log_summary(),
                ) as engine:
                    async for progress in engine.map(worker,
                                                     [Task(data=None)]):
                        print(f"progress={progress}")

                with open(out_path, "r") as f:
                    out = json.load(f)

                return out
Beispiel #16
0
async def main(subnet_tag: str):
    package = await vm.repo(
        image_hash="9a3b5d67b0b27746283cb5f287c13eab1beaa12d92a9f536b747c7ae",
        min_mem_gib=0.5,
        min_storage_gib=2.0,
    )

    async def worker(ctx: WorkContext, tasks):
        scene_path = str(script_dir / "cubes.blend")
        ctx.send_file(scene_path, "/golem/resource/scene.blend")
        async for task in tasks:
            frame = task.data
            crops = [{
                "outfilebasename": "out",
                "borders_x": [0.0, 1.0],
                "borders_y": [0.0, 1.0]
            }]
            ctx.send_json(
                "/golem/work/params.json",
                {
                    "scene_file": "/golem/resource/scene.blend",
                    "resolution": (400, 300),
                    "use_compositing": False,
                    "crops": crops,
                    "samples": 100,
                    "frames": [frame],
                    "output_format": "PNG",
                    "RESOURCES_DIR": "/golem/resources",
                    "WORK_DIR": "/golem/work",
                    "OUTPUT_DIR": "/golem/output",
                },
            )
            ctx.run("/golem/entrypoints/run-blender.sh")
            output_file = f"output_{frame}.png"
            ctx.download_file(f"/golem/output/out{frame:04d}.png", output_file)
            try:
                # Set timeout for executing the script on the provider. Two minutes is plenty
                # of time for computing a single frame, for other tasks it may be not enough.
                # If the timeout is exceeded, this worker instance will be shut down and all
                # remaining tasks, including the current one, will be computed by other providers.
                yield ctx.commit(timeout=timedelta(seconds=120))
                # TODO: Check if job results are valid
                # and reject by: task.reject_task(reason = 'invalid file')
                task.accept_result(result=output_file)
            except BatchTimeoutError:
                print(f"{utils.TEXT_COLOR_RED}"
                      f"Task timed out: {task}, time: {task.running_time}"
                      f"{utils.TEXT_COLOR_DEFAULT}")
                raise

    # Iterator over the frame indices that we want to render
    frames: range = range(0, 60, 10)
    # Worst-case overhead, in minutes, for initialization (negotiation, file transfer etc.)
    # TODO: make this dynamic, e.g. depending on the size of files to transfer
    init_overhead = 3
    # Providers will not accept work if the timeout is outside of the [5 min, 30min] range.
    # We increase the lower bound to 6 min to account for the time needed for our demand to
    # reach the providers.
    min_timeout, max_timeout = 6, 30

    timeout = timedelta(
        minutes=max(min(init_overhead +
                        len(frames) * 2, max_timeout), min_timeout))

    # By passing `event_consumer=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Executor(
        package=package,
        max_workers=3,
        budget=10.0,
        timeout=timeout,
        subnet_tag=subnet_tag,
        event_consumer=log_summary(log_event_repr),
    ) as executor:

        async for task in executor.submit(
                worker, [Task(data=frame) for frame in frames]):
            print(
                f"{utils.TEXT_COLOR_CYAN}"
                f"Task computed: {task}, result: {task.result}, time: {task.running_time}"
                f"{utils.TEXT_COLOR_DEFAULT}")
Beispiel #17
0
    def _default_event_consumer() -> Callable[[events.Event], None]:
        from yapapi.log import log_event_repr, log_summary

        return log_summary(log_event_repr)
Beispiel #18
0
async def main(subnet_tag, driver=None, network=None):
    package = await vm.repo(
        image_hash="2c5d9a80847eb147261c4e33df6e6955666ddd932ec40fd8b005f799",
        min_mem_gib=1,
        min_storage_gib=10.0,
    )

    async def worker(ctx: WorkContext, tasks):
        script_dir = pathlib.Path(__file__).resolve().parent
        async for task in tasks:
            frame = task.data
            ctx.run("/bin/sh", "-c", "./generate_data.sh")
            output_file = f"output/{frame}.zip"
            ctx.download_file(f"/golem/output/data.zip", output_file)
            try:
                # Set timeout for executing the script on the provider. Two minutes is plenty
                # of time for computing a single frame, for other tasks it may be not enough.
                # If the timeout is exceeded, this worker instance will be shut down and all
                # remaining tasks, including the current one, will be computed by other providers.
                yield ctx.commit(timeout=timedelta(seconds=1750))
                # TODO: Check if job results are valid
                # and reject by: task.reject_task(reason = 'invalid file')
                task.accept_result(result=output_file)
            except BatchTimeoutError:
                print(
                    f"{TEXT_COLOR_RED}"
                    f"Task {task} timed out on {ctx.provider_name}, time: {task.running_time}"
                    f"{TEXT_COLOR_DEFAULT}")
                raise

    # Iterator over the frame indices that we want to render
    frames: range = range(
        0,
        60,
    )
    # Worst-case overhead, in minutes, for initialization (negotiation, file transfer etc.)
    # TODO: make this dynamic, e.g. depending on the size of files to transfer
    init_overhead = 3
    # Providers will not accept work if the timeout is outside of the [5 min, 30min] range.
    # We increase the lower bound to 6 min to account for the time needed for our demand to
    # reach the providers.
    min_timeout, max_timeout = 6, 30

    timeout = timedelta(
        minutes=max(min(init_overhead +
                        len(frames) * 2, max_timeout), min_timeout))

    # By passing `event_consumer=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Executor(
        package=package,
        max_workers=30,
        budget=10.0,
        timeout=timeout,
        subnet_tag=subnet_tag,
        driver=driver,
        network=network,
        event_consumer=log_summary(log_event_repr),
    ) as executor:

        sys.stderr.write(
            f"yapapi version: {TEXT_COLOR_YELLOW}{yapapi_version}{TEXT_COLOR_DEFAULT}\n"
            f"Using subnet: {TEXT_COLOR_YELLOW}{subnet_tag}{TEXT_COLOR_DEFAULT}, "
            f"payment driver: {TEXT_COLOR_YELLOW}{executor.driver}{TEXT_COLOR_DEFAULT}, "
            f"and network: {TEXT_COLOR_YELLOW}{executor.network}{TEXT_COLOR_DEFAULT}\n"
        )

        num_tasks = 0
        start_time = datetime.now()

        async for task in executor.submit(
                worker, [Task(data=frame) for frame in frames]):
            num_tasks += 1
            print(
                f"{TEXT_COLOR_CYAN}"
                f"Task computed: {task}, result: {task.result}, time: {task.running_time}"
                f"{TEXT_COLOR_DEFAULT}")

        print(
            f"{TEXT_COLOR_CYAN}"
            f"{num_tasks} tasks computed, total time: {datetime.now() - start_time}"
            f"{TEXT_COLOR_DEFAULT}")
Beispiel #19
0
async def main(args):
    package = await vm.repo(
        image_hash="2c17589f1651baff9b82aa431850e296455777be265c2c5446c902e9",
        min_mem_gib=0.5,
        min_storage_gib=2.0,
    )

    async def worker_check_keyspace(ctx: WorkContext, tasks):
        async for task in tasks:
            keyspace_sh_filename = "keyspace.sh"
            ctx.send_file(keyspace_sh_filename, "/golem/work/keyspace.sh")
            ctx.run("/bin/sh", "/golem/work/keyspace.sh")
            output_file = "keyspace.txt"
            ctx.download_file("/golem/work/keyspace.txt", output_file)
            yield ctx.commit()
            task.accept_result()

    async def worker_find_password(ctx: WorkContext, tasks):
        ctx.send_file("in.hash", "/golem/work/in.hash")

        async for task in tasks:
            skip = task.data
            limit = skip + step

            # Commands to be run on the provider
            commands = (
                "rm -f /golem/work/*.potfile ~/.hashcat/hashcat.potfile; "
                f"touch /golem/work/hashcat_{skip}.potfile; "
                f"hashcat -a 3 -m 400 /golem/work/in.hash {args.mask} --skip={skip} --limit={limit} --self-test-disable -o /golem/work/hashcat_{skip}.potfile || true"
            )
            ctx.run(f"/bin/sh", "-c", commands)

            output_file = f"hashcat_{skip}.potfile"
            ctx.download_file(f"/golem/work/hashcat_{skip}.potfile",
                              output_file)
            yield ctx.commit()
            task.accept_result(result=output_file)

    # beginning of the main flow

    write_hash(args.hash)
    write_keyspace_check_script(args.mask)

    # By passing `event_consumer=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Executor(
        package=package,
        max_workers=args.number_of_providers,
        budget=10.0,
        # timeout should be keyspace / number of providers dependent
        timeout=timedelta(minutes=25),
        subnet_tag=args.subnet_tag,
        event_consumer=log_summary(log_event_repr),
    ) as executor:

        keyspace_computed = False
        # This is not a typical use of executor.submit as there is only one task, with no data:
        async for _task in executor.submit(worker_check_keyspace,
                                           [Task(data=None)]):
            keyspace_computed = True

        if not keyspace_computed:
            # Assume the errors have been already reported and we may return quietly.
            return

        keyspace = read_keyspace()

        print(
            f"{utils.TEXT_COLOR_CYAN}"
            f"Task computed: keyspace size count. The keyspace size is {keyspace}"
            f"{utils.TEXT_COLOR_DEFAULT}")

        step = int(keyspace / args.number_of_providers) + 1

        ranges = range(0, keyspace, step)

        async for task in executor.submit(
                worker_find_password, [Task(data=range) for range in ranges]):
            print(f"{utils.TEXT_COLOR_CYAN}"
                  f"Task computed: {task}, result: {task.result}"
                  f"{utils.TEXT_COLOR_DEFAULT}")

        password = read_password(ranges)

        if password is None:
            print(
                f"{utils.TEXT_COLOR_RED}No password found{utils.TEXT_COLOR_DEFAULT}"
            )
        else:
            print(f"{utils.TEXT_COLOR_GREEN}"
                  f"Password found: {password}"
                  f"{utils.TEXT_COLOR_DEFAULT}")
Beispiel #20
0
async def main(subnet_tag: str, presets: str, input_file: str, job: str):
    package = await vm.repo(
        image_hash="9a6b2409ccd96b3352b3fcdc6e1e568bdfa4d4e258589cfaff084786",
        min_mem_gib=0.5,
        min_storage_gib=2.0,
    )

    preset_list = list(filter(str.strip, presets.split(",")))

    async def worker(ctx: WorkContext, tasks):
        sent_file = "/golem/resource/input.file"
        ctx.send_file(input_file, sent_file)

        async for task in tasks:
            preset = task.data

            if "MKV" in preset or preset == "Roku 2160p60 4K HEVC Surround":
                output_ext = ".mkv"
            else:
                output_ext = ".mp4"

            subjob = f"{job}_{preset_list.index(preset)+1}"

            update_status(subjob, "starting")

            output_file = f"{subjob}{output_ext}"

            ctx.log(f"job: {subjob}")

            update_status(subjob, "job sent")

            commands = (
                'cd /golem/output; '
                f'echo preset:{preset} > log.txt; '
                f'echo output_file:{output_file} >> log.txt; '
                f"HandBrakeCLI -i {sent_file} -o {output_file} --preset '{preset}' >> log.txt 2>&1; "
                'ls -lah >> log.txt')

            ctx.run("/bin/sh", "-c", commands)

            #ctx.download_file(f"/golem/output/log.txt", "log.txt")
            ctx.download_file(f"/golem/output/{output_file}",
                              f"./downloads/{output_file}")

            yield ctx.commit()

            update_status(subjob, "done")

            # TODO: Check if job results are valid
            # and reject by: task.reject_task(reason = 'invalid file')
            task.accept_task(result=output_file)

        ctx.log("no more videos to convert!")

    init_overhead: timedelta = timedelta(minutes=7)

    # By passing `event_emitter=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Engine(
        package=package,
        max_workers=10,
        budget=20.0,
        timeout=init_overhead,
        subnet_tag=subnet_tag,
        event_emitter=log_summary(log_event_repr),
    ) as engine:

        async for task in engine.map(
                worker, [Task(data=preset) for preset in preset_list]):
            print(f"{utils.TEXT_COLOR_CYAN}"
                  f"Task computed: {task}, result: {task.output}"
                  f"{utils.TEXT_COLOR_DEFAULT}")
Beispiel #21
0
async def main(subnet_tag: str, node_count: int, timeout_seconds: int,
               password: str):
    print(
        f"Stats: Nodes = {node_count}, Timeout = {timeout_seconds}s, Password = {password}"
    )

    if node_count < 2:
        raise Exception("Invalid node_count. There must be 2 or more nodes.")
    else:
        print("Running more than 2 nodes")

    package = await vm.repo(
        image_hash="a6722779ca397520db82f354684a46a13c473aca04e38b91ec32755e",
        min_mem_gib=4,
        min_storage_gib=3.0,
    )

    async def ripper(ctx: WorkContext, tasks):
        async for task in tasks:
            output_file = f"out_{str(task.data['node'])}.txt"
            #ctx.send_file('ripper.sh', '/golem/entrypoints/ripper.sh') #in case you want to send your modify it bash uncomment
            ctx.run('bin/sh', '/golem/entrypoints/ripper.sh',
                    str(task.data['node']), str(task.data['nodes']),
                    str(timeout_seconds), password)
            ctx.log("task data is:")
            ctx.log(task.data)
            ctx.download_file("/golem/output/result.txt", output_file)
            yield ctx.commit()
            task.accept_task(result=output_file)

        ctx.log("hmm.. it looks like we got some passwords")

    # iterator over the frame indices that we want to render
    nodes = [
        Task(data={
            'node': i + 1,
            'nodes': node_count
        }) for i in range(node_count)
    ]

    init_overhead: timedelta = timedelta(minutes=10)

    # By passing `event_emitter=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Engine(
        package=package,
        max_workers=node_count,
        budget=30.0,
        timeout=init_overhead + timedelta(minutes=node_count * 2),
        subnet_tag=subnet_tag,
        event_emitter=log_summary(log_event_repr),
    ) as engine:

        async for task in engine.map(ripper, nodes):
            print(f"{utils.TEXT_COLOR_CYAN}"
                  f"Task computed: {task}, result: {task.output}"
                  f"{utils.TEXT_COLOR_DEFAULT}")

    # Process done, so remind the user of the parameters and show the results
    print(
        f"Parameters: Nodes = {node_count}, Timeout = {timeout_seconds}s, Password = {password}"
    )
    for i in range(node_count):
        output_file = f"out_{str(i+1)}.txt"
        with open(output_file) as f:
            lines = f.readlines()

        echoer_message = f"Ripper {i+1} did not find the password"
        for line in lines:
            if "?" in line:
                echoer_message = f"{utils.TEXT_COLOR_GREEN}Worker {i+1} found the password: {line[2:].strip()}{utils.TEXT_COLOR_DEFAULT}"
                break

        print(echoer_message)
Beispiel #22
0
async def main(subnet_tag: str, node_count: int, timeout_seconds: int, password: str):
    print(f"Parameters: Nodes = {node_count}, Timeout = {timeout_seconds}s, Password = {password}")

    if node_count < 2:
        raise Exception("Invalid node_count. There must be 2 or more nodes.")

    package = await vm.repo(
        image_hash="c6c34d6462daa7307ace83c08028461411a0e0133d4db053904a89df",
        min_mem_gib=4,
        min_storage_gib=3.0,
    )

    async def worker(ctx: WorkContext, tasks):
        async for task in tasks:
            output_file = f"out_{str(task.data['node'])}.txt"
            ctx.run("/golem/entrypoints/crack.sh", str(task.data['node']), str(task.data['nodes']), str(timeout_seconds), password)
            ctx.log("task data is:")
            ctx.log(task.data)
            ctx.download_file("/golem/output/result.txt", output_file)
            yield ctx.commit()
            # TODO: Check if job results are valid
            # and reject by: task.reject_task(reason = 'invalid file')
            task.accept_task(result=output_file)

        ctx.log("no more frames to render")

    # iterator over the frame indices that we want to render
    nodes = [Task(data={'node': i+1, 'nodes': node_count}) for i in range(node_count)]

    init_overhead: timedelta = timedelta(minutes=10)

    # By passing `event_emitter=log_summary()` we enable summary logging.
    # See the documentation of the `yapapi.log` module on how to set
    # the level of detail and format of the logged information.
    async with Engine(
        package=package,
        max_workers=node_count,
        budget=20.0,
        timeout=init_overhead + timedelta(minutes=node_count * 2),
        subnet_tag=subnet_tag,
        event_emitter=log_summary(log_event_repr),
    ) as engine:

        async for task in engine.map(worker, nodes):
            print(
                f"{utils.TEXT_COLOR_CYAN}"
                f"Task computed: {task}, result: {task.output}"
                f"{utils.TEXT_COLOR_DEFAULT}"
            )
        
    # Processing is done, so remind the user of the parameters and show the results
    print(f"Parameters: Nodes = {node_count}, Timeout = {timeout_seconds}s, Password = {password}")
    for i in range(node_count):
            output_file = f"out_{str(i+1)}.txt"
            with open(output_file) as f:
                lines = f.readlines()

            found_message = f"Worker {i+1} did not find the password"
            for line in lines:
                if "?" in line:
                    found_message = f"{utils.TEXT_COLOR_YELLOW}Worker {i+1} found the password: {line[2:].strip()}{utils.TEXT_COLOR_DEFAULT}"
                    break

            print(found_message)