async def worker(ctx: WorkContext, tasks):
        script_dir = pathlib.Path(__file__).resolve().parent
        scene_path = str(script_dir / "cubes.blend")
        # Set timeout for the first script executed on the provider. Usually, 30 seconds
        # should be more than enough for computing a single frame of the provided scene,
        # however a provider may require more time for the first task if it needs to download
        # the VM image first. Once downloaded, the VM image will be cached and other tasks that use
        # that image will be computed faster.
        script = ctx.new_script(timeout=timedelta(minutes=10))
        script.upload_file(scene_path, "/golem/resource/scene.blend")

        async for task in tasks:
            frame = task.data
            crops = [{"outfilebasename": "out", "borders_x": [0.0, 1.0], "borders_y": [0.0, 1.0]}]
            script.upload_json(
                {
                    "scene_file": "/golem/resource/scene.blend",
                    "resolution": (400, 300),
                    "use_compositing": False,
                    "crops": crops,
                    "samples": 100,
                    "frames": [frame],
                    "output_format": "PNG",
                    "RESOURCES_DIR": "/golem/resources",
                    "WORK_DIR": "/golem/work",
                    "OUTPUT_DIR": "/golem/output",
                },
                "/golem/work/params.json",
            )
            script.run("/golem/entrypoints/run-blender.sh")
            output_file = f"output_{frame}.png"
            script.download_file(f"/golem/output/out{frame:04d}.png", output_file)
            try:
                yield script
                # TODO: Check if job results are valid
                # and reject by: task.reject_task(reason = 'invalid file')
                task.accept_result(result=output_file)
            except BatchTimeoutError:
                print(
                    f"{TEXT_COLOR_RED}"
                    f"Task {task} timed out on {ctx.provider_name}, time: {task.running_time}"
                    f"{TEXT_COLOR_DEFAULT}"
                )
                raise

            # reinitialize the script which we send to the engine to compute subsequent frames
            script = ctx.new_script(timeout=timedelta(minutes=1))

            if show_usage:
                raw_state = await ctx.get_raw_state()
                usage = format_usage(await ctx.get_usage())
                cost = await ctx.get_cost()
                print(
                    f"{TEXT_COLOR_MAGENTA}"
                    f" --- {ctx.provider_name} STATE: {raw_state}\n"
                    f" --- {ctx.provider_name} USAGE: {usage}\n"
                    f" --- {ctx.provider_name}  COST: {cost}"
                    f"{TEXT_COLOR_DEFAULT}"
                )
Exemple #2
0
async def perform_mask_attack(ctx: WorkContext, tasks: AsyncIterable[Task]):
    """Worker script which performs a hashcat mask attack against a target hash.

    This function is used as the `worker` parameter to `Golem#execute_tasks`.
    It represents a sequence of commands to be executed on a remote provider node.
    """
    async for task in tasks:
        skip = task.data
        limit = skip + args.chunk_size

        output_name = f"yacat_{skip}.potfile"
        worker_output_path = f"/golem/output/{output_name}"

        script = ctx.new_script(timeout=MASK_ATTACK_TIMEOUT)
        script.run(f"/bin/sh", "-c", _make_attack_command(skip, limit, worker_output_path))
        try:
            output_file = Path(gettempdir()) / output_name
            script.download_file(worker_output_path, str(output_file))

            yield script

            with output_file.open() as f:
                result = f.readline()
                task.accept_result(result)
        finally:
            output_file.unlink()
Exemple #3
0
    async def worker(ctx: WorkContext, tasks):
        """A worker function for `Golem.execute_tasks()`.

        The first call to this function will produce a worker
        that sends an invalid `run` command to the provider.
        This should cause `yield script` to fail with
        `CommandExecutionError`.

        The remaining calls will just send `sleep 5` to the
        provider to simulate some work.
        """

        nonlocal first_worker
        should_fail = first_worker
        first_worker = False

        async for task in tasks:

            script = ctx.new_script()

            if should_fail:
                # Send a command that will fail on the provider
                script.run("xyz")
                yield script
            else:
                # Simulate some work
                script.run("/bin/sleep", "5")
                yield script

            task.accept_result()
Exemple #4
0
async def worker(context: WorkContext, tasks: AsyncIterable[Task]):
    async for task in tasks:
        script = context.new_script()
        future_result = script.run("/bin/sh", "-c", "date")

        yield script

        task.accept_result(result=await future_result)
Exemple #5
0
 async def worker(ctx: WorkContext, tasks):
     async for task in tasks:
         ctx.run("/golem/entrypoints/runold.sh")
         output_file = "output.txt"
         ctx.download_file("/golem/output/output.txt", output_file)
         try:
             # Set timeout for executing the script on the provider. Two minutes is plenty
             # of time for computing a single frame, for other tasks it may be not enough.
             # If the timeout is exceeded, this worker instance will be shut down and all
             # remaining tasks, including the current one, will be computed by other providers.
             yield ctx.commit(timeout=timedelta(seconds=18000))
             # TODO: Check if job results are valid
             # and reject by: task.reject_task(reason = 'invalid file')
             task.accept_result(result=output_file)
         except BatchTimeoutError:
             print(f"{utils.TEXT_COLOR_RED}"
                   f"Task timed out: {task}, time: {task.running_time}"
                   f"{utils.TEXT_COLOR_DEFAULT}")
             raise
Exemple #6
0
 async def worker_check_keyspace(ctx: WorkContext, tasks):
     async for task in tasks:
         keyspace_sh_filename = "keyspace.sh"
         ctx.send_file(keyspace_sh_filename, "/golem/work/keyspace.sh")
         ctx.run("/bin/sh", "/golem/work/keyspace.sh")
         output_file = "keyspace.txt"
         ctx.download_file("/golem/work/keyspace.txt", output_file)
         yield ctx.commit()
         task.accept_result()
Exemple #7
0
 async def worker(ctx: WorkContext, tasks):
     script_dir = pathlib.Path(__file__).resolve().parent
     async for task in tasks:
         frame = task.data
         ctx.run("/bin/sh", "-c", "./generate_data.sh")
         output_file = f"output/{frame}.zip"
         ctx.download_file(f"/golem/output/data.zip", output_file)
         try:
             # Set timeout for executing the script on the provider. Two minutes is plenty
             # of time for computing a single frame, for other tasks it may be not enough.
             # If the timeout is exceeded, this worker instance will be shut down and all
             # remaining tasks, including the current one, will be computed by other providers.
             yield ctx.commit(timeout=timedelta(seconds=1750))
             # TODO: Check if job results are valid
             # and reject by: task.reject_task(reason = 'invalid file')
             task.accept_result(result=output_file)
         except BatchTimeoutError:
             print(
                 f"{TEXT_COLOR_RED}"
                 f"Task {task} timed out on {ctx.provider_name}, time: {task.running_time}"
                 f"{TEXT_COLOR_DEFAULT}")
             raise
Exemple #8
0
 async def worker(ctx: WorkContext, tasks):
     scene_path = str(script_dir / "cubes.blend")
     ctx.send_file(scene_path, "/golem/resource/scene.blend")
     async for task in tasks:
         frame = task.data
         crops = [{
             "outfilebasename": "out",
             "borders_x": [0.0, 1.0],
             "borders_y": [0.0, 1.0]
         }]
         ctx.send_json(
             "/golem/work/params.json",
             {
                 "scene_file": "/golem/resource/scene.blend",
                 "resolution": (400, 300),
                 "use_compositing": False,
                 "crops": crops,
                 "samples": 100,
                 "frames": [frame],
                 "output_format": "PNG",
                 "RESOURCES_DIR": "/golem/resources",
                 "WORK_DIR": "/golem/work",
                 "OUTPUT_DIR": "/golem/output",
             },
         )
         ctx.run("/golem/entrypoints/run-blender.sh")
         output_file = f"output_{frame}.png"
         ctx.download_file(f"/golem/output/out{frame:04d}.png", output_file)
         try:
             # Set timeout for executing the script on the provider. Two minutes is plenty
             # of time for computing a single frame, for other tasks it may be not enough.
             # If the timeout is exceeded, this worker instance will be shut down and all
             # remaining tasks, including the current one, will be computed by other providers.
             yield ctx.commit(timeout=timedelta(seconds=120))
             # TODO: Check if job results are valid
             # and reject by: task.reject_task(reason = 'invalid file')
             task.accept_result(result=output_file)
         except BatchTimeoutError:
             print(f"{utils.TEXT_COLOR_RED}"
                   f"Task timed out: {task}, time: {task.running_time}"
                   f"{utils.TEXT_COLOR_DEFAULT}")
             raise
    async def worker(ctx: WorkContext, tasks):
        cwd = pathlib.Path.cwd()
        async for task in tasks:
            batch = task.data
            try:
                os.remove(batch.local_out(cwd))
            except:
                pass

            for exe in batch.all_executables():
                ctx.send_file(batch.local_exe(cwd, exe), batch.remote_exe(exe))
            batch.make_local_sh(cwd)
            ctx.send_file(batch.local_sh(cwd), batch.remote_sh())
            ctx.run("/bin/bash", batch.remote_sh())
            ctx.download_file(batch.remote_out(), batch.local_out(cwd))
            try:
                yield ctx.commit(timeout=timedelta(seconds=batch.timeout()))
                task.accept_result(result=batch.local_out(cwd))
            except BatchTimeoutError:
                print(
                    f"{text_colors.RED}"
                    f"Task timed out: {task.data.id}, time: {task.running_time}"
                    f"{text_colors.DEFAULT}")
                raise
Exemple #10
0
    async def worker(ctx: WorkContext, tasks):
        async for task in tasks:
            output_file = f"output_{datetime.now()}_{random.random()}.txt"
            script = ctx.new_script(timeout=timedelta(minutes=10))
            script.run("/usr/bin/stress-ng", "--cpu", "1", "--timeout", "1")
            script.run("/golem/task.sh", "-o", "1024", "-t", "5")
            script.run("/golem/task.sh", "-f",
                       "/golem/output/output.txt,1048576")
            script.download_file(f"/golem/output/output.txt", output_file)
            script.run("/golem/task.sh", "-e", "1024", "-t", "5")

            try:
                yield script
                task.accept_result(result=output_file)
            except BatchTimeoutError:
                print(
                    f"{TEXT_COLOR_RED}"
                    f"Task {task} timed out on {ctx.provider_name}, time: {task.running_time}"
                    f"{TEXT_COLOR_DEFAULT}")
                raise
Exemple #11
0
    async def worker_find_password(ctx: WorkContext, tasks):
        ctx.send_file("in.hash", "/golem/work/in.hash")

        async for task in tasks:
            skip = task.data
            limit = skip + step

            # Commands to be run on the provider
            commands = (
                "rm -f /golem/work/*.potfile ~/.hashcat/hashcat.potfile; "
                f"touch /golem/work/hashcat_{skip}.potfile; "
                f"hashcat -a 3 -m 400 /golem/work/in.hash {args.mask} --skip={skip} --limit={limit} --self-test-disable -o /golem/work/hashcat_{skip}.potfile || true"
            )
            ctx.run(f"/bin/sh", "-c", commands)

            output_file = f"hashcat_{skip}.potfile"
            ctx.download_file(f"/golem/work/hashcat_{skip}.potfile",
                              output_file)
            yield ctx.commit()
            task.accept_result(result=output_file)
Exemple #12
0
async def compute_keyspace(context: WorkContext, tasks: AsyncIterable[Task]):
    """Worker script which computes the size of the keyspace for the mask attack.

    This function is used as the `worker` parameter to `Golem#execute_tasks`.
    It represents a sequence of commands to be executed on a remote provider node.
    """
    async for task in tasks:
        cmd = f"hashcat --keyspace " f"-a {HASHCAT_ATTACK_MODE} -m {args.hash_type} {args.mask}"
        s = context.new_script(timeout=KEYSPACE_TIMEOUT)
        s.run("/bin/bash", "-c", cmd)

        try:
            future_result = yield s

            # each item is the result of a single command on the provider (including setup commands)
            result: List[CommandExecuted] = await future_result
            # we take the last item since it's the last command that was executed on the provider
            cmd_result: CommandExecuted = result[-1]

            keyspace = int(cmd_result.stdout)
            task.accept_result(result=keyspace)
        except CommandExecutionError as e:
            raise RuntimeError(f"Failed to compute attack keyspace: {e}")
Exemple #13
0
    async def worker(ctx: WorkContext, tasks):
        assert ctx.provider_id not in scanned_nodes

        async for task in tasks:
            print(
                f"{TEXT_COLOR_CYAN}"
                f"Getting info for {ctx.provider_id} (aka {ctx.provider_name})"
                f"{TEXT_COLOR_DEFAULT}",
            )
            script = ctx.new_script()

            future_result = script.run("/bin/cat", "/proc/cpuinfo")
            yield script

            result = (await future_result).stdout or ""

            cpu_model_match = re.search("^model name\\s+:\\s+(.*)$", result, flags=re.MULTILINE)
            if cpu_model_match:
                result = cpu_model_match.group(1)
            else:
                result = None

            # add the node to the set so we don't end up signing another agreement with it
            scanned_nodes.add(ctx.provider_id)

            # and accept the result (pass the result to the loop in `main`)
            task.accept_result((ctx.provider_id, ctx.provider_name, result))

            # as we don't really want the engine to execute any more tasks on this node,
            # we signal the parent generator to exit and through that
            # also request termination of the worker and the agreement
            #
            # issuing a `break` here instead will usually not do what the user is expecting,
            # as the parent generator would just exit cleanly without notifying the
            # engine and there's nothing stopping the engine from re-launching the activity/worker
            # on the same agreement
            await tasks.aclose()
 async def worker_train_model(ctx: WorkContext, tasks):
     async for task in tasks:
         global_round = task.data['global_round']
         node_id = task.data['node_id']
         model_path = os.path.join(ROUND_WEIGHTS_FOLDER,
                                   f'round_{global_round - 1}.h5')
         ctx.send_file(model_path,
                       f"/golem/work/model_{global_round - 1}.h5")
         specs = {
             'start': task.data['start'],
             'end': task.data['end'],
             'batch_size': BATCH_SIZE,
             'model_path': f'model_{global_round - 1}.h5',
             'epochs': PROVIDER_EPOCHS,
             'global_round': task.data['global_round'],
             'node_number': task.data['node_id']
         }
         ctx.send_json(
             "/golem/work/specs.json",
             specs,
         )
         ctx.send_file('client.py', "/golem/work/client.py")
         ctx.run("/bin/sh", "-c", "python3 client.py")
         node_model_output = f'/golem/output/model_round_{global_round}_{node_id}.h5'
         node_log_file = f'/golem/output/log_round_{global_round}_{node_id}.json'
         ctx.download_file(
             node_model_output,
             os.path.join(WORKER_MODEL_WEIGHTS_FOLDER,
                          f'round_{global_round}_worker_{node_id}.h5'))
         ctx.download_file(
             node_log_file,
             os.path.join(
                 WORKER_LOGS_FOLDER,
                 f'log_round_{global_round}_worker_{node_id}.json'))
         yield ctx.commit(timeout=timedelta(minutes=7))
         task.accept_result()
Exemple #15
0
 async def worker(ctx: WorkContext, tasks):
     async for task in tasks:
         step_name = step['name']
         commands = step['commands']
         # prepair envs in string form of: "k1=v1 k2=v2 ... kn=vn "
         envs = step.get('environment')
         print(f"\033[36;1mSending the context zip file: {self.tar_fname}\033[0m")
         ctx.send_file(self.tar_fname , "/golem/resource/context.zip")
         # extracting tar file.
         print(f"\033[36;1mExtracting the zip file: {self.tar_fname}\033[0m")
         ctx.run("/bin/sh", "-c", "unzip /golem/resource/context.zip")
         # run all commands one by one
         for command in commands:
             print(f"\033[36;1mRunning {command}\033[0m")
             # set envs.
             ctx.run("/bin/sh", "-c", f"{command} >> /golem/output/cmd.log 2>&1", env=envs)
         log_fname = get_temp_log_file(step_name)
         ctx.download_file(f"/golem/output/cmd.log", log_fname)
         try:
             yield ctx.commit(timeout=timedelta(minutes=30))
             task.accept_result(result=log_fname)
         except BatchTimeoutError:
             print(f"Task timed out: {task}, time: {task.running_time}")
             raise
     ctx.log("no more task to run")