Beispiel #1
0
        def range(start, end, initial_step, cache_type=DEFAULT_CACHE_TYPE, context=None):
            context = context or tst.get_simple_traceback(1)

            def execute_range(func):
                return explicit.range(func, start, end, initial_step, toma_cache_type=cache_type, toma_context=context)

            return execute_range
Beispiel #2
0
        def batch(initial_batchsize, cache_type=DEFAULT_CACHE_TYPE, context=None):
            context = context or tst.get_simple_traceback(1)

            def execute_batch(func):
                return explicit.batch(func, initial_batchsize, toma_cache_type=cache_type, toma_context=context)

            return execute_batch
Beispiel #3
0
    def get_batchsize(self, initial_batchsize: int):
        stacktrace = tst.get_simple_traceback(2)

        if self.TRACK_RAM:
            cpu_available_memory = int(
                toma.cpu_memory.get_available_cpu_memory() //
                self.MEMORY_GRANULARITY)
        else:
            cpu_available_memory = -1

        gpu_available_memory = int(tcm.get_cuda_assumed_available_memory() //
                                   self.MEMORY_GRANULARITY)

        batchsize = self.get_batchsize_from_cache(stacktrace,
                                                  cpu_available_memory,
                                                  gpu_available_memory)
        batchsize.set_initial_batchsize(initial_batchsize)
        return batchsize
Beispiel #4
0
        def chunked(
            tensor: torch.Tensor,
            initial_step: Optional[int] = None,
            dimension: Optional[int] = None,
            cache_type: Type = DEFAULT_CACHE_TYPE,
            context=None,
        ):
            context = context or tst.get_simple_traceback(1)

            def execute_chunked(func):
                return explicit.chunked(
                    func,
                    tensor,
                    initial_step,
                    toma_dimension=dimension,
                    toma_cache_type=cache_type,
                    toma_context=context,
                )

            return execute_chunked
def get_stacktrace():
    return stacktrace.get_simple_traceback()
Beispiel #6
0
 def __init__(self):
     stacktrace = tst.get_simple_traceback(2)
     BatchsizeCache.all_instances[stacktrace] = self