Esempio n. 1
0
 def _model_complete(self, model: Model):
     time.sleep(random.uniform(0, 1))
     if random.uniform(0, 1) < self.failure_prob:
         model.status = ModelStatus.Failed
     else:
         model.metric = random.uniform(0, 1)
         model.status = ModelStatus.Trained
     self._resource_left += 1
Esempio n. 2
0
    def test_py_execution_engine(self):

        advisor = RetiariiAdvisor()
        set_execution_engine(PurePythonExecutionEngine())
        model = Model._load({
            '_model': {
                'inputs': None,
                'outputs': None,
                'nodes': {
                    'layerchoice_1': {
                        'operation': {
                            'type': 'LayerChoice',
                            'parameters': {
                                'candidates': ['0', '1']
                            }
                        }
                    }
                },
                'edges': []
            }
        })
        model.python_class = object
        submit_models(model, model)

        advisor.stopping = True
        advisor.default_worker.join()
        advisor.assessor_worker.join()
Esempio n. 3
0
    def test_py_execution_engine(self):
        nni.retiarii.integration_api._advisor = None
        nni.retiarii.execution.api._execution_engine = None
        advisor = RetiariiAdvisor('ws://_unittest_placeholder_')
        advisor._channel = LegacyCommandChannel()
        advisor.default_worker.start()
        advisor.assessor_worker.start()

        set_execution_engine(PurePythonExecutionEngine())
        model = Model._load({
            '_model': {
                'inputs': None,
                'outputs': None,
                'nodes': {
                    'layerchoice_1': {
                        'operation': {
                            'type': 'LayerChoice',
                            'parameters': {
                                'candidates': ['0', '1']
                            }
                        }
                    }
                },
                'edges': []
            }
        })
        model.evaluator = DebugEvaluator()
        model.python_class = object
        submit_models(model, model)

        advisor.stopping = True
        advisor.default_worker.join()
        advisor.assessor_worker.join()
Esempio n. 4
0
 def test_codegen(self):
     with open(self.enclosing_dir / 'mnist_pytorch.json') as f:
         model = Model._load(json.load(f))
         script = model_to_pytorch_script(model)
     with open(self.enclosing_dir / 'debug_mnist_pytorch.py') as f:
         reference_script = f.read()
     self.assertEqual(script.strip(), reference_script.strip())
Esempio n. 5
0
 def test_mnist_example_pytorch(self):
     with open('mnist_pytorch.json') as f:
         model = Model._load(json.load(f))
         script = model_to_pytorch_script(model)
     with open('debug_mnist_pytorch.py') as f:
         reference_script = f.read()
     self.assertEqual(script.strip(), reference_script.strip())
Esempio n. 6
0
    def test_base_execution_engine(self):
        advisor = RetiariiAdvisor()
        set_execution_engine(BaseExecutionEngine())
        with open(self.enclosing_dir / 'mnist_pytorch.json') as f:
            model = Model._load(json.load(f))
        submit_models(model, model)

        advisor.stopping = True
        advisor.default_worker.join()
        advisor.assessor_worker.join()
Esempio n. 7
0
def _load_mnist(n_models: int = 1):
    path = Path(__file__).parent / 'converted_mnist_pytorch.json'
    with open(path) as f:
        mnist_model = Model._load(json.load(f))
    if n_models == 1:
        return mnist_model
    else:
        models = [mnist_model]
        for i in range(n_models - 1):
            models.append(mnist_model.fork())
        return models
Esempio n. 8
0
 def test_trainer(self):
     sys.path.insert(0, Path(__file__).parent.as_posix())
     Model = import_('debug_mnist_pytorch._model')
     trainer = PyTorchImageClassificationTrainer(
         Model(),
         dataset_kwargs={
             'root': (Path(__file__).parent / 'data' / 'mnist').as_posix(),
             'download': True
         },
         dataloader_kwargs={'batch_size': 32},
         optimizer_kwargs={'lr': 1e-3},
         trainer_kwargs={'max_epochs': 1})
     trainer.fit()
Esempio n. 9
0
    def test_submit_models(self):
        os.makedirs('generated', exist_ok=True)
        from nni.runtime import protocol
        protocol._out_file = open(
            Path(__file__).parent / 'generated/debug_protocol_out_file.py',
            'wb')
        advisor = RetiariiAdvisor()
        with open('mnist_pytorch.json') as f:
            model = Model._load(json.load(f))
        submit_models(model, model)

        advisor.stopping = True
        advisor.default_worker.join()
        advisor.assessor_worker.join()
Esempio n. 10
0
def _load_mnist(n_models: int = 1):
    path = Path(__file__).parent / 'mnist_pytorch.json'
    with open(path) as f:
        mnist_model = Model._load(json.load(f))
        mnist_model.evaluator = _new_trainer()

    if n_models == 1:
        return mnist_model
    else:
        models = [mnist_model]
        for i in range(n_models - 1):
            forked_model = mnist_model.fork()
            forked_model.evaluator = _new_trainer()
            models.append(forked_model)
        return models
Esempio n. 11
0
def _load_mnist(n_models: int = 1):
    path = Path('ut/nas/mnist_pytorch.json')
    with open(path) as f:
        mnist_model = Model._load(nni.load(fp=f))
        mnist_model.evaluator = _new_trainer()

    if n_models == 1:
        return mnist_model
    else:
        models = [mnist_model]
        for i in range(n_models - 1):
            forked_model = mnist_model.fork()
            forked_model.evaluator = _new_trainer()
            models.append(forked_model)
        return models
Esempio n. 12
0
    def test_base_execution_engine(self):
        nni.retiarii.integration_api._advisor = None
        nni.retiarii.execution.api._execution_engine = None
        advisor = RetiariiAdvisor('ws://_unittest_placeholder_')
        advisor._channel = LegacyCommandChannel()
        advisor.default_worker.start()
        advisor.assessor_worker.start()

        set_execution_engine(BaseExecutionEngine())
        with open(self.enclosing_dir / 'mnist_pytorch.json') as f:
            model = Model._load(json.load(f))
        submit_models(model, model)

        advisor.stopping = True
        advisor.default_worker.join()
        advisor.assessor_worker.join()