Exemplo n.º 1
0
    def worker(self, args: WorkerArgs):
        logging.info(f'World {args.world_name} has been started')
        if args.world_constants:
            world = World(args.world_constants)
        else:
            world = serializer.load(args.world_save)
        analyzer = MammothAnalyzer(world)

        world_start_time = world.time
        start_time = prev_save_time = time.perf_counter()
        while True:
            if world.time % self.save_world_each == 0:
                now = time.perf_counter()
                total_elapsed = now - start_time
                total_performance = total_elapsed / ((world.time - world_start_time) or 1)
                elapsed = now - prev_save_time
                performance = elapsed / self.save_world_each
                logging.info(
                    f'{args.world_name}: '
                    f'{world.time} wtime, '
                    f'{elapsed:.3f}s elapsed, '
                    f'{performance:.6f} performance, '
                    f'{total_elapsed:.3f}s total elapsed, '
                    f'{total_performance:.6f} total performance.'
                )
                self._save_world(world, args.snapshot_dir)
                prev_save_time = now

            world.update()
            analyzer.update()
            if analyzer.amount_of_killings > 0.01:
                logging.info(f'World {args.world_name} got reaction at {world.time}')
                break

            if self.max_cycle and world.time >= self.max_cycle:
                break

        total_performance = (time.perf_counter() - start_time) / ((world.time - world_start_time) or 1)

        self._save_world(world, args.snapshot_dir)
        logging.info(f'World {args.world_name} has finished with average performance: {total_performance}')
Exemplo n.º 2
0
def do_work(connection, channel, delivery_tag, job):
    global SIGTERM
    global SIGUSR1
    _LOGGER.info(
        f'Worker begin. Delivery tag: {delivery_tag}. Raw job: {job!r}')
    # Parse job
    job = json.loads(job)
    snapshot_dir = job['snapshot_dir']
    latest_tick = job.get('latest_tick')
    cycle_amount = job.get('cycle_amount', 1000)
    max_cycle = job.get('max_cycle', 10_000)
    world_constants_override = job.get('constants')

    # Load or Create world
    if latest_tick:
        save_path = os.path.join(snapshot_dir, f'{latest_tick}.wrld')
        _LOGGER.info(f'Loading from {save_path}')
        world = serializer.load(save_path)
    else:
        _LOGGER.info(f'Creating new world {snapshot_dir}')
        world_constants = WorldConstants()
        if world_constants_override:
            new_dict = {
                **world_constants.to_dict(False),
                **world_constants_override
            }
            world_constants = WorldConstants.from_dict(new_dict)
        world = World(world_constants)
        save_path = os.path.join(snapshot_dir, '0.wrld')
        serializer.save(world, save_path)

    world_start_time = world.time
    stop_world = False
    _LOGGER.info(
        f'World {save_path} calculating for {cycle_amount}. max_cycle {max_cycle}'
    )
    start_time = time.time()
    # Calculate world
    for _ in range(cycle_amount):
        world.update()
        if world.time >= max_cycle or len(world.animals) == 0:
            stop_world = True
            break

        if SIGTERM is True:
            _LOGGER.warning("SIGTERM received in worker. Finishing it.")
            break

        if SIGUSR1 is True:
            _LOGGER.info(f"Current world {save_path} time is {world.time}")
            SIGUSR1 = False

    # Analyzing performance
    elapsed = time.time() - start_time
    performance = elapsed / ((world.time - world_start_time) or 1)
    _LOGGER.info(
        f'World: {save_path}, calculated: {world.time - world_start_time} ticks, '
        f'world.time: {world.time} ticks, elapsed: {elapsed:.3f}s, performance: {performance:.6f} s/tick'
    )
    # Saving world
    save_path = os.path.join(snapshot_dir, f'{world.time}.wrld')
    _LOGGER.info(f'Saving {save_path}')
    serializer.save(world, save_path)

    # Preparing new job
    if not stop_world:
        job['latest_tick'] = world.time
        new_job = json.dumps(job)
        scale_down = False
    else:
        _LOGGER.info(f'World {save_path} is finished')
        new_job = None
        scale_down = True

    _LOGGER.info(
        f'Worker done. Delivery tag: {delivery_tag}. new_message: {new_job}')
    cb = functools.partial(ack_message,
                           channel,
                           delivery_tag,
                           new_message=new_job,
                           scale_down=scale_down)
    connection.add_callback_threadsafe(cb)