def tune(self, n_trial, **kwargs): from antares.common import Mock current_step = 0 while current_step < n_trial and self.has_next(): batch_size = min(self.batch_size, n_trial - current_step) configs = self.next_batch(batch_size) inputs = [] for config in configs: input_format = Mock() input_format.config = config inputs.append(input_format) results = self.measure_batch(inputs) self.update(inputs, results) current_step += batch_size
def _callback(op): if op.tag != 'antares_injective': output_spec = op.output(0) for inp in sch[output_spec].op.input_tensors: if isinstance(inp.op, te.tensor.ComputeOp) and not inp.op.reduce_axis: sch[inp].compute_inline() attrs = Mock() attrs.inputs = inputs attrs.outputs = [output_spec] attrs.scheduler = sch attrs.auto_config = cfg attrs.backend = backend attrs.ir = program attrs.options = options attrs.blend = '' attrs.get_extent = lambda axis: int(str(axis).split('ext=')[-1].split(')')[0]) AntaresGlobal.attrs = attrs do_native_scheduling(attrs)
def _callback(op, explicit_ops): attrs = Mock() attrs.inputs = inputs attrs.explicit_ops = explicit_ops attrs.outputs = [op.output(0)] attrs.scheduler = sch attrs.auto_config = cfg attrs.backend = backend attrs.ir = program attrs.options = options attrs.blend = '' attrs.get_extent = lambda axis: int(axis.dom.extent) AntaresGlobal.attrs = attrs do_native_scheduling(attrs)
def _callback(explicit_ops): attrs = Mock() attrs.device_props = get_device_props() attrs.inputs = list(inputs) attrs.outputs = list(outputs) attrs.explicit_ops = explicit_ops attrs.scheduler = sch attrs.auto_config = AntaresGlobal.auto_config attrs.backend = backend attrs.ir = program attrs.options = options attrs.blend = '' attrs.get_extent = lambda axis: int(axis.dom.extent) def get_lower(): return str( tvm.lower(sch, attrs.inputs + attrs.outputs, simple_mode=True)).split('#[metadata]')[0] attrs.get_lower = get_lower AntaresGlobal.attrs = attrs do_native_scheduling(attrs)