Exemplo n.º 1
0
def _get_final_result():
    result = nni.load(nni.runtime.platform.test._last_metric)['value']
    if isinstance(result, list):
        return [float(_) for _ in result]
    else:
        if isinstance(result, str) and '[' in result:
            return nni.load(result)
        return [float(result)]
Exemplo n.º 2
0
def test_type():
    assert nni.dump(
        torch.optim.Adam) == '{"__nni_type__": "path:torch.optim.adam.Adam"}'
    assert nni.load(
        '{"__nni_type__": "path:torch.optim.adam.Adam"}') == torch.optim.Adam
    assert Foo == nni.load(nni.dump(Foo))
    assert nni.dump(math.floor) == '{"__nni_type__": "path:math.floor"}'
    assert nni.load('{"__nni_type__": "path:math.floor"}') == math.floor
Exemplo n.º 3
0
def test_basic_unit_and_custom_import():
    module = ImportTest(3, 0.5)
    ss = nni.dump(module)
    assert ss == r'{"__symbol__": "path:imported.model.ImportTest", "__kwargs__": {"foo": 3, "bar": 0.5}}'
    assert nni.load(nni.dump(module)) == module

    import nni.retiarii.nn.pytorch as nn
    module = nn.Conv2d(3, 10, 3, bias=False)
    ss = nni.dump(module)
    assert ss == r'{"__symbol__": "path:torch.nn.modules.conv.Conv2d", "__kwargs__": {"in_channels": 3, "out_channels": 10, "kernel_size": 3, "bias": false}}'
    assert nni.load(ss).bias is None
Exemplo n.º 4
0
def test_custom_class():
    module = nni.trace(Foo)(3)
    assert nni.load(nni.dump(module)) == module
    module = nni.trace(Foo)(b=2, a=1)
    assert nni.load(nni.dump(module)) == module

    module = nni.trace(Foo)(Foo(1), 5)
    dumped_module = nni.dump(module)
    module = nni.load(dumped_module)
    assert module.bb[0] == module.bb[999] == 6

    module = nni.trace(Foo)(nni.trace(Foo)(1), 5)
    dumped_module = nni.dump(module)
    assert nni.load(dumped_module) == module
Exemplo n.º 5
0
 def import_data(self, data):  # for resuming experiment
     if isinstance(data, str):
         data = nni.load(data)
     for trial in data:
         if isinstance(trial, str):
             trial = nni.load(trial)
         param = format_parameters(trial['parameter'], self.space)
         loss = trial['value']
         if isinstance(loss, dict) and 'default' in loss:
             loss = loss['default']
         if self.optimize_mode is OptimizeMode.Maximize:
             loss = -loss
         for key, value in param.items():
             self._history[key].append(Record(value, loss))
     _logger.info(f'Replayed {len(data)} trials')
Exemplo n.º 6
0
def test_custom_class():
    module = nni.trace(Foo)(3)
    assert nni.load(nni.dump(module)) == module
    module = nni.trace(Foo)(b=2, a=1)
    assert nni.load(nni.dump(module)) == module

    module = nni.trace(Foo)(Foo(1), 5)
    dumped_module = nni.dump(module)
    assert len(
        dumped_module
    ) > 200  # should not be too longer if the serialization is correct

    module = nni.trace(Foo)(nni.trace(Foo)(1), 5)
    dumped_module = nni.dump(module)
    assert nni.load(dumped_module) == module
Exemplo n.º 7
0
def test_dataset():
    dataset = nni.trace(MNIST)(root='data/mnist', train=False, download=True)
    dataloader = nni.trace(DataLoader)(dataset, batch_size=10)

    dumped_ans = {
        "__symbol__": "path:torch.utils.data.dataloader.DataLoader",
        "__kwargs__": {
            "dataset": {
                "__symbol__": "path:torchvision.datasets.mnist.MNIST",
                "__kwargs__": {
                    "root": "data/mnist",
                    "train": False,
                    "download": True
                }
            },
            "batch_size": 10
        }
    }
    print(nni.dump(dataloader))
    print(nni.dump(dumped_ans))
    assert nni.dump(dataloader) == nni.dump(dumped_ans)
    dataloader = nni.load(nni.dump(dumped_ans))
    assert isinstance(dataloader, DataLoader)

    dataset = nni.trace(MNIST)(root='data/mnist',
                               train=False,
                               download=True,
                               transform=nni.trace(transforms.Compose)([
                                   nni.trace(transforms.ToTensor)(),
                                   nni.trace(transforms.Normalize)((0.1307, ),
                                                                   (0.3081, ))
                               ]))
    dataloader = nni.trace(DataLoader)(dataset, batch_size=10)
    x, y = next(iter(nni.load(nni.dump(dataloader))))
    assert x.size() == torch.Size([10, 1, 28, 28])
    assert y.size() == torch.Size([10])

    dataset = nni.trace(MNIST)(root='data/mnist',
                               train=False,
                               download=True,
                               transform=nni.trace(transforms.Compose)([
                                   transforms.ToTensor(),
                                   transforms.Normalize((0.1307, ), (0.3081, ))
                               ]))
    dataloader = nni.trace(DataLoader)(dataset, batch_size=10)
    x, y = next(iter(nni.load(nni.dump(dataloader))))
    assert x.size() == torch.Size([10, 1, 28, 28])
    assert y.size() == torch.Size([10])
Exemplo n.º 8
0
def test_nested_class():
    a = SimpleClass(1, 2)
    b = SimpleClass(a)
    assert b._a._a == 1
    dump_str = nni.dump(b)
    b = nni.load(dump_str)
    assert 'SimpleClass object at' in repr(b)
    assert b._a._a == 1
Exemplo n.º 9
0
def test_lightning_earlystop():
    import nni.retiarii.evaluator.pytorch.lightning as pl
    from pytorch_lightning.callbacks.early_stopping import EarlyStopping
    trainer = pl.Trainer(
        callbacks=[nni.trace(EarlyStopping)(monitor="val_loss")])
    trainer = nni.load(nni.dump(trainer))
    assert any(
        isinstance(callback, EarlyStopping) for callback in trainer.callbacks)
Exemplo n.º 10
0
 def _process_value(value) -> Any:  # hopefully a float
     value = nni.load(value)
     if isinstance(value, dict):
         if 'default' in value:
             return value['default']
         else:
             return value
     return value
Exemplo n.º 11
0
 def read_file(self):
     '''load config from local file'''
     if os.path.exists(self.experiment_file):
         try:
             with open(self.experiment_file, 'r') as file:
                 return nni.load(fp=file)
         except ValueError:
             return {}
     return {}
Exemplo n.º 12
0
def test_lightning_earlystop():
    import nni.retiarii.evaluator.pytorch.lightning as pl
    from pytorch_lightning.callbacks.early_stopping import EarlyStopping
    trainer = pl.Trainer(
        callbacks=[nni.trace(EarlyStopping)(monitor="val_loss")])
    pickle_size_limit = 4096 if sys.platform == 'linux' else 32768
    trainer = nni.load(nni.dump(trainer, pickle_size_limit=pickle_size_limit))
    assert any(
        isinstance(callback, EarlyStopping) for callback in trainer.callbacks)
Exemplo n.º 13
0
    def test_submit_models(self):
        _reset()
        nni.retiarii.debug_configs.framework = 'pytorch'
        os.makedirs('generated', exist_ok=True)
        import nni.runtime.platform.test as tt
        protocol._set_out_file(
            open('generated/debug_protocol_out_file.py', 'wb'))
        protocol._set_in_file(
            open('generated/debug_protocol_out_file.py', 'rb'))

        models = _load_mnist(2)

        advisor = RetiariiAdvisor('ws://_unittest_placeholder_')
        advisor._channel = protocol.LegacyCommandChannel()
        advisor.default_worker.start()
        advisor.assessor_worker.start()

        remote = RemoteConfig(machine_list=[])
        remote.machine_list.append(
            RemoteMachineConfig(host='test', gpu_indices=[0, 1, 2, 3]))
        cgo_engine = CGOExecutionEngine(training_service=remote,
                                        batch_waiting_time=0)
        set_execution_engine(cgo_engine)
        submit_models(*models)
        time.sleep(3)

        if torch.cuda.is_available() and torch.cuda.device_count() >= 2:
            cmd, data = protocol.receive()
            params = nni.load(data)

            tt.init_params(params)

            trial_thread = threading.Thread(
                target=CGOExecutionEngine.trial_execute_graph)
            trial_thread.start()
            last_metric = None
            while True:
                time.sleep(1)
                if tt._last_metric:
                    metric = tt.get_last_metric()
                    if metric == last_metric:
                        continue
                    if 'value' in metric:
                        metric['value'] = json.dumps(metric['value'])
                    advisor.handle_report_metric_data(metric)
                    last_metric = metric
                if not trial_thread.is_alive():
                    trial_thread.join()
                    break

            trial_thread.join()

        advisor.stopping = True
        advisor.default_worker.join()
        advisor.assessor_worker.join()
        cgo_engine.join()
Exemplo n.º 14
0
def test_nested_class():
    a = SimpleClass(1, 2)
    b = SimpleClass(a)
    assert b._a._a == 1
    dump_str = nni.dump(b)
    b = nni.load(dump_str)
    assert repr(
        b
    ) == 'SerializableObject(type=SimpleClass, a=SerializableObject(type=SimpleClass, a=1, b=2))'
    assert b.get()._a._a == 1
Exemplo n.º 15
0
def test_ordered_json():
    items = [
        ('a', 1),
        ('c', 3),
        ('b', 2),
    ]
    orig = OrderedDict(items)
    json = nni.dump(orig)
    loaded = nni.load(json)
    assert list(loaded.items()) == items
Exemplo n.º 16
0
    def test_submit_models(self):
        _reset()
        nni.retiarii.debug_configs.framework = 'pytorch'
        os.makedirs('generated', exist_ok=True)
        from nni.runtime import protocol
        import nni.runtime.platform.test as tt
        protocol._set_out_file(
            open('generated/debug_protocol_out_file.py', 'wb'))
        protocol._set_in_file(
            open('generated/debug_protocol_out_file.py', 'rb'))

        models = _load_mnist(2)

        advisor = RetiariiAdvisor()
        cgo_engine = CGOExecutionEngine(devices=[
            GPUDevice("test", 0),
            GPUDevice("test", 1),
            GPUDevice("test", 2),
            GPUDevice("test", 3)
        ],
                                        batch_waiting_time=0)
        set_execution_engine(cgo_engine)
        submit_models(*models)
        time.sleep(3)

        if torch.cuda.is_available() and torch.cuda.device_count() >= 2:
            cmd, data = protocol.receive()
            params = nni.load(data)

            tt.init_params(params)

            trial_thread = threading.Thread(
                target=CGOExecutionEngine.trial_execute_graph)
            trial_thread.start()
            last_metric = None
            while True:
                time.sleep(1)
                if tt._last_metric:
                    metric = tt.get_last_metric()
                    if metric == last_metric:
                        continue
                    if 'value' in metric:
                        metric['value'] = json.dumps(metric['value'])
                    advisor.handle_report_metric_data(metric)
                    last_metric = metric
                if not trial_thread.is_alive():
                    trial_thread.join()
                    break

            trial_thread.join()

        advisor.stopping = True
        advisor.default_worker.join()
        advisor.assessor_worker.join()
        cgo_engine.join()
Exemplo n.º 17
0
    def handle_report_metric_data(self, data):
        """
        Parameters
        ----------
        data:
            it is an object which has keys 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'.

        Raises
        ------
        ValueError
            Data type not supported
        """
        if 'value' in data:
            data['value'] = nni.load(data['value'])
        # multiphase? need to check
        if data['type'] == MetricType.REQUEST_PARAMETER:
            assert multi_phase_enabled()
            assert data['trial_job_id'] is not None
            assert data['parameter_index'] is not None
            assert data['trial_job_id'] in self.job_id_para_id_map
            self._handle_trial_end(
                self.job_id_para_id_map[data['trial_job_id']])
            ret = self._get_one_trial_job()
            if data['trial_job_id'] is not None:
                ret['trial_job_id'] = data['trial_job_id']
            if data['parameter_index'] is not None:
                ret['parameter_index'] = data['parameter_index']
            self.job_id_para_id_map[data['trial_job_id']] = ret['parameter_id']
            send(CommandType.SendTrialJobParameter, nni.dump(ret))
        else:
            value = extract_scalar_reward(data['value'])
            bracket_id, i, _ = data['parameter_id'].split('_')

            # add <trial_job_id, parameter_id> to self.job_id_para_id_map here,
            # because when the first parameter_id is created, trial_job_id is not known yet.
            if data['trial_job_id'] in self.job_id_para_id_map:
                assert self.job_id_para_id_map[
                    data['trial_job_id']] == data['parameter_id']
            else:
                self.job_id_para_id_map[
                    data['trial_job_id']] = data['parameter_id']

            if data['type'] == MetricType.FINAL:
                # sys.maxsize indicates this value is from FINAL metric data, because data['sequence'] from FINAL metric
                # and PERIODICAL metric are independent, thus, not comparable.
                self.brackets[bracket_id].set_config_perf(
                    int(i), data['parameter_id'], sys.maxsize, value)
                self.completed_hyper_configs.append(data)
            elif data['type'] == MetricType.PERIODICAL:
                self.brackets[bracket_id].set_config_perf(
                    int(i), data['parameter_id'], data['sequence'], value)
            else:
                raise ValueError('Data type not supported: {}'.format(
                    data['type']))
Exemplo n.º 18
0
def test_simple_class():
    instance = SimpleClass(1, 2)
    assert instance._a == 1
    assert instance._b == 2

    dump_str = nni.dump(instance)
    assert '"__kwargs__": {"a": 1, "b": 2}' in dump_str
    assert '"__symbol__"' in dump_str
    instance = nni.load(dump_str)
    assert instance._a == 1
    assert instance._b == 2
Exemplo n.º 19
0
def test_model_wrapper_serialize():
    from nni.retiarii import model_wrapper

    @model_wrapper
    class Model(nn.Module):
        def __init__(self, in_channels):
            super().__init__()
            self.in_channels = in_channels

    model = Model(3)
    dumped = nni.dump(model)
    loaded = nni.load(dumped)
    assert loaded.in_channels == 3
Exemplo n.º 20
0
    def handle_import_data(self, data):
        """Import additional data for tuning

        Parameters
        ----------
        data:
            a list of dictionarys, each of which has at least two keys, 'parameter' and 'value'

        Raises
        ------
        AssertionError
            data doesn't have required key 'parameter' and 'value'
        """
        for entry in data:
            entry['value'] = nni.load(entry['value'])
        _completed_num = 0
        for trial_info in data:
            logger.info("Importing data, current processing progress %s / %s",
                        _completed_num, len(data))
            _completed_num += 1
            assert "parameter" in trial_info
            _params = trial_info["parameter"]
            assert "value" in trial_info
            _value = trial_info['value']
            if not _value:
                logger.info(
                    "Useless trial data, value is %s, skip this trial data.",
                    _value)
                continue
            _value = extract_scalar_reward(_value)
            budget_exist_flag = False
            barely_params = dict()
            for keys in _params:
                if keys == _KEY:
                    _budget = _params[keys]
                    budget_exist_flag = True
                else:
                    barely_params[keys] = _params[keys]
            if not budget_exist_flag:
                _budget = self.max_budget
                logger.info("Set \"TRIAL_BUDGET\" value to %s (max budget)",
                            self.max_budget)
            if self.optimize_mode is OptimizeMode.Maximize:
                reward = -_value
            else:
                reward = _value
            self.cg.new_result(loss=reward,
                               budget=_budget,
                               parameters=barely_params,
                               update_model=True)
        logger.info("Successfully import tuning data to BOHB advisor.")
Exemplo n.º 21
0
 def handle_trial_end(self, data):
     """
     Parameters
     ----------
     data: dict()
         it has three keys: trial_job_id, event, hyper_params
         trial_job_id: the id generated by training service
         event: the job's state
         hyper_params: the hyperparameters (a string) generated and returned by tuner
     """
     hyper_params = nni.load(data['hyper_params'])
     self._handle_trial_end(hyper_params['parameter_id'])
     if data['trial_job_id'] in self.job_id_para_id_map:
         del self.job_id_para_id_map[data['trial_job_id']]
Exemplo n.º 22
0
def test_function():
    t = nni.trace(math.sqrt, kw_only=False)(3)
    assert 1 < t < 2
    assert t.trace_symbol == math.sqrt
    assert t.trace_args == [3]
    t = nni.load(nni.dump(t))
    assert 1 < t < 2
    assert not is_traceable(t)  # trace not recovered, expected, limitation

    def simple_class_factory(bb=3.):
        return SimpleClass(1, bb)

    t = nni.trace(simple_class_factory)(4)
    ts = nni.dump(t)
    assert '__kwargs__' in ts
    t = nni.load(ts)
    assert t._a == 1
    assert is_traceable(t)
    t = t.trace_copy()
    assert is_traceable(t)
    assert t.trace_symbol(10)._b == 10
    assert t.trace_kwargs['bb'] == 4
    assert is_traceable(t.trace_copy())
Exemplo n.º 23
0
def _load_mnist(n_models: int = 1):
    path = Path('ut/nas/mnist_pytorch.json')
    with open(path) as f:
        mnist_model = Model._load(nni.load(fp=f))
        mnist_model.evaluator = _new_trainer()

    if n_models == 1:
        return mnist_model
    else:
        models = [mnist_model]
        for i in range(n_models - 1):
            forked_model = mnist_model.fork()
            forked_model.evaluator = _new_trainer()
            models.append(forked_model)
        return models
Exemplo n.º 24
0
    def handle_trial_end(self, data):
        """receive the information of trial end and generate next configuaration.

        Parameters
        ----------
        data: dict()
            it has three keys: trial_job_id, event, hyper_params
            trial_job_id: the id generated by training service
            event: the job's state
            hyper_params: the hyperparameters (a string) generated and returned by tuner
        """
        logger.debug('Tuner handle trial end, result is %s', data)
        hyper_params = nni.load(data['hyper_params'])
        self._handle_trial_end(hyper_params['parameter_id'])
        if data['trial_job_id'] in self.job_id_para_id_map:
            del self.job_id_para_id_map[data['trial_job_id']]
Exemplo n.º 25
0
def test_external_class():
    from collections import OrderedDict
    d = nni.trace(kw_only=False)(OrderedDict)([('a', 1), ('b', 2)])
    assert d['a'] == 1
    assert d['b'] == 2
    dump_str = nni.dump(d)
    assert dump_str == '{"a": 1, "b": 2}'

    conv = nni.trace(torch.nn.Conv2d)(3, 16, 3)
    assert conv.in_channels == 3
    assert conv.out_channels == 16
    assert conv.kernel_size == (3, 3)
    assert nni.dump(conv) == \
        r'{"__symbol__": "path:torch.nn.modules.conv.Conv2d", ' \
        r'"__kwargs__": {"in_channels": 3, "out_channels": 16, "kernel_size": 3}}'

    conv = nni.load(nni.dump(conv))
    assert conv.kernel_size == (3, 3)
Exemplo n.º 26
0
def test_get():
    @nni.trace
    class Foo:
        def __init__(self, a=1):
            self._a = a

        def bar(self):
            return self._a + 1

    obj = Foo(3)
    assert nni.load(nni.dump(obj)).bar() == 4
    obj1 = obj.trace_copy()
    with pytest.raises(AttributeError):
        obj1.bar()
    obj1.trace_kwargs['a'] = 5
    obj1 = obj1.get()
    assert obj1.bar() == 6
    obj2 = obj1.trace_copy()
    obj2.trace_kwargs['a'] = -1
    assert obj2.get().bar() == 0
Exemplo n.º 27
0
def test_unserializable():
    a = UnserializableSimpleClass()
    dump_str = nni.dump(a)
    a = nni.load(dump_str)
    assert a._a == 1
Exemplo n.º 28
0
 def refresh_config(self):
     '''refresh to get latest config'''
     sql = 'select params from ExperimentProfile where id=? order by revision DESC'
     args = (self.experiment_id, )
     self.config = config_v0_to_v1(
         nni.load(self.conn.cursor().execute(sql, args).fetchone()[0]))
Exemplo n.º 29
0
 def handle_trial_end(self, data):
     _logger.debug('Trial end: %s', data)
     self.trial_end_callback(
         nni.load(data['hyper_params'])['parameter_id'],  # pylint: disable=not-callable
         data['event'] == 'SUCCEEDED')
Exemplo n.º 30
0
    def handle_report_metric_data(self, data):
        """reveice the metric data and update Bayesian optimization with final result

        Parameters
        ----------
        data:
            it is an object which has keys 'parameter_id', 'value', 'trial_job_id', 'type', 'sequence'.

        Raises
        ------
        ValueError
            Data type not supported
        """
        logger.debug('handle report metric data = %s', data)
        if 'value' in data:
            data['value'] = nni.load(data['value'])
        if data['type'] == MetricType.REQUEST_PARAMETER:
            assert multi_phase_enabled()
            assert data['trial_job_id'] is not None
            assert data['parameter_index'] is not None
            assert data['trial_job_id'] in self.job_id_para_id_map
            self._handle_trial_end(
                self.job_id_para_id_map[data['trial_job_id']])
            ret = self._get_one_trial_job()
            if ret is None:
                self.unsatisfied_jobs.append({
                    'trial_job_id':
                    data['trial_job_id'],
                    'parameter_index':
                    data['parameter_index']
                })
            else:
                ret['trial_job_id'] = data['trial_job_id']
                ret['parameter_index'] = data['parameter_index']
                # update parameter_id in self.job_id_para_id_map
                self.job_id_para_id_map[
                    data['trial_job_id']] = ret['parameter_id']
                send(CommandType.SendTrialJobParameter, nni.dump(ret))
        else:
            assert 'value' in data
            value = extract_scalar_reward(data['value'])
            if self.optimize_mode is OptimizeMode.Maximize:
                reward = -value
            else:
                reward = value
            assert 'parameter_id' in data
            s, i, _ = data['parameter_id'].split('_')
            logger.debug('bracket id = %s, metrics value = %s, type = %s', s,
                         value, data['type'])
            s = int(s)

            # add <trial_job_id, parameter_id> to self.job_id_para_id_map here,
            # because when the first parameter_id is created, trial_job_id is not known yet.
            if data['trial_job_id'] in self.job_id_para_id_map:
                assert self.job_id_para_id_map[
                    data['trial_job_id']] == data['parameter_id']
            else:
                self.job_id_para_id_map[
                    data['trial_job_id']] = data['parameter_id']

            assert 'type' in data
            if data['type'] == MetricType.FINAL:
                # and PERIODICAL metric are independent, thus, not comparable.
                assert 'sequence' in data
                self.brackets[s].set_config_perf(int(i), data['parameter_id'],
                                                 sys.maxsize, value)
                self.completed_hyper_configs.append(data)

                _parameters = self.parameters[data['parameter_id']]
                _parameters.pop(_KEY)
                # update BO with loss, max_s budget, hyperparameters
                self.cg.new_result(loss=reward,
                                   budget=data['sequence'],
                                   parameters=_parameters,
                                   update_model=True)
            elif data['type'] == MetricType.PERIODICAL:
                self.brackets[s].set_config_perf(int(i), data['parameter_id'],
                                                 data['sequence'], value)
            else:
                raise ValueError('Data type not supported: {}'.format(
                    data['type']))