Esempio n. 1
0
 def __enter__(self):
     if not self.enabled:
         return
     if self.entered:
         raise RuntimeError(
             "ITT annotation context manager is not reentrant")
     self.entered = True
     _enable_profiler(
         ProfilerConfig(ProfilerState.ITT, self.record_shapes, False, False,
                        False, False, _ExperimentalConfig()), set())
     return self
Esempio n. 2
0
    def __init__(
            self,
            enabled=True,
            *,
            use_cuda=False,
            record_shapes=False,
            with_flops=False,
            profile_memory=False,
            with_stack=False,
            with_modules=False,
            use_kineto=False,
            use_cpu=True,
            experimental_config=None):
        self.enabled: bool = enabled
        if not self.enabled:
            return
        self.use_cuda = use_cuda
        self.function_events: Optional[EventList] = None
        self.entered = False
        self.record_shapes = record_shapes
        self.with_flops = with_flops
        self.record_shapes |= self.with_flops
        self.profile_memory = profile_memory
        self.with_stack = with_stack
        self.with_modules = with_modules
        self.use_cpu = use_cpu
        if experimental_config is None:
            experimental_config = _ExperimentalConfig()
        self.experimental_config = experimental_config
        self.kineto_results: Optional[_ProfilerResult] = None

        if not self.use_cpu:
            assert use_kineto, \
                "Device-only events supported only with Kineto (use_kineto=True)"

        if self.use_cuda and not torch.cuda.is_available():
            warn("CUDA is not available, disabling CUDA profiling")
            self.use_cuda = False

        self.kineto_activities = set()
        if self.use_cpu:
            self.kineto_activities.add(ProfilerActivity.CPU)

        self.profiler_kind = ProfilerState.KINETO
        if self.use_cuda:
            if (not use_kineto or ProfilerActivity.CUDA not in
                    _supported_activities()):
                assert self.use_cpu, "Legacy CUDA profiling requires use_cpu=True"
                self.profiler_kind = ProfilerState.KINETO_GPU_FALLBACK
            else:
                self.kineto_activities.add(ProfilerActivity.CUDA)

        assert len(self.kineto_activities) > 0, \
            "No activities specified for the profiler"
Esempio n. 3
0
 def __enter__(self):
     if not self.enabled:
         return
     if self.entered:
         raise RuntimeError(
             "NVTX annotation context manager is not reentrant")
     self.entered = True
     torch.cuda.synchronize()
     _enable_profiler(
         ProfilerConfig(ProfilerState.NVTX, self.record_shapes, False,
                        False, False, False, _ExperimentalConfig()), set())
     return self