def fast_experiment(project: Project,nb_name:str,globs:dict,return_files: bool = True, default:str = "main.py",**kwargs) -> Experiment: """Creates a Neptune ML experiment, wrapped with meta data. Args: project: Neptune Project nb_name: str name of the current notebook to be recorded globs: dict of the global variables. Simply set globs = globals() and then pass it. return_files: bool, True if we want to send files recorded in the parameters. default: str name of the default code kwargs: additional args passed to Neptune ML when the experiment is created Returns: exp: Neptune ML experiment """ # First we get the code cells codes = get_codes(nb_name,default=default) # We write them in separate files for fn,code in codes.items(): with open(fn,"w") as file: file.write(code) codes = list(codes.keys()) # We get the properties properties,files = get_properties_from_cells(nb_name,globs=globs,return_files=return_files) metadata = get_metadata() properties.update(metadata) properties["nb_name"] = nb_name # We convert the dict keys to string for k,v in properties.items(): properties[k] = str(v) exp = project.create_experiment(params=properties,upload_source_files=codes,**kwargs) # We create the requirements file and send it create_requirements(nb_name) exp.send_artifact("requirements.txt") for fn in files: exp.send_artifact(fn) yield exp exp.stop() # We remove the code files for fn in codes: os.remove(fn) os.remove("requirements.txt")
def fast_experiment(project: Project, nb_name: str, globs: dict, return_files: bool = True, default: str = "main.py", **kwargs) -> Experiment: # First we get the code cells codes = get_codes(nb_name, default=default) # We write them in separate files for fn, code in codes.items(): with open(fn, "w") as file: file.write(code) codes = list(codes.keys()) # We get the properties properties, files = get_properties_from_cells(nb_name, globs=globs, return_files=return_files) metadata = get_metadata() properties.update(metadata) properties["nb_name"] = nb_name # We convert the dict keys to string for k, v in properties.items(): properties[k] = str(v) exp = project.create_experiment(params=properties, upload_source_files=codes, **kwargs) # We create the requirements file and send it create_requirements(nb_name) exp.send_artifact("requirements.txt") for fn in files: exp.send_artifact(fn) yield exp exp.stop() # We remove the code files for fn in codes: os.remove(fn) os.remove("requirements.txt")
class TestProject(unittest.TestCase): def setUp(self): super(TestProject, self).setUp() self.backend = MagicMock() self.project = Project(backend=self.backend, internal_id=a_uuid_string(), namespace=a_string(), name=a_string()) self.current_directory = os.getcwd() def tearDown(self): os.chdir(self.current_directory) def test_get_members(self): # given member_usernames = [a_string() for _ in range(0, 2)] members = [ a_registered_project_member(username) for username in member_usernames ] # and self.backend.get_project_members.return_value = members + [ an_invited_project_member() ] # when fetched_member_usernames = self.project.get_members() # then self.backend.get_project_members.assert_called_once_with( self.project.internal_id) # and self.assertEqual(member_usernames, fetched_member_usernames) def test_get_experiments_with_no_params(self): # given leaderboard_entries = [MagicMock() for _ in range(0, 2)] self.backend.get_leaderboard_entries.return_value = leaderboard_entries # when experiments = self.project.get_experiments() # then self.backend.get_leaderboard_entries.assert_called_once_with( project=self.project, ids=None, states=None, owners=None, tags=None, min_running_time=None) # and expected_experiments = [ Experiment(self.backend, self.project, entry.id, entry.internal_id) for entry in leaderboard_entries ] self.assertEqual(expected_experiments, experiments) def test_get_experiments_with_scalar_params(self): # given leaderboard_entries = [MagicMock() for _ in range(0, 2)] self.backend.get_leaderboard_entries.return_value = leaderboard_entries # and params = dict(id=a_string(), state='succeeded', owner=a_string(), tag=a_string(), min_running_time=randint(1, 100)) # when experiments = self.project.get_experiments(**params) # then expected_params = dict(project=self.project, ids=[params['id']], states=[params['state']], owners=[params['owner']], tags=[params['tag']], min_running_time=params['min_running_time']) self.backend.get_leaderboard_entries.assert_called_once_with( **expected_params) # and expected_experiments = [ Experiment(self.backend, self.project, entry.id, entry.internal_id) for entry in leaderboard_entries ] self.assertEqual(expected_experiments, experiments) def test_get_experiments_with_list_params(self): # given leaderboard_entries = [MagicMock() for _ in range(0, 2)] self.backend.get_leaderboard_entries.return_value = leaderboard_entries # and params = dict(id=a_string_list(), state=['succeeded', 'failed'], owner=a_string_list(), tag=a_string_list(), min_running_time=randint(1, 100)) # when experiments = self.project.get_experiments(**params) # then expected_params = dict(project=self.project, ids=params['id'], states=params['state'], owners=params['owner'], tags=params['tag'], min_running_time=params['min_running_time']) self.backend.get_leaderboard_entries.assert_called_once_with( **expected_params) # and expected_experiments = [ Experiment(self.backend, self.project, entry.id, entry.internal_id) for entry in leaderboard_entries ] self.assertEqual(expected_experiments, experiments) def test_get_leaderboard(self): # given self.backend.get_leaderboard_entries.return_value = [ LeaderboardEntry(some_exp_entry_dto) ] # when leaderboard = self.project.get_leaderboard() # then self.backend.get_leaderboard_entries.assert_called_once_with( project=self.project, ids=None, states=None, owners=None, tags=None, min_running_time=None) # and expected_data = {0: some_exp_entry_row} expected_leaderboard = pd.DataFrame.from_dict(data=expected_data, orient='index') expected_leaderboard = expected_leaderboard.reindex( # pylint: disable=protected-access self.project._sort_leaderboard_columns(expected_leaderboard.columns ), axis='columns') self.assertTrue(leaderboard.equals(expected_leaderboard)) def test_sort_leaderboard_columns(self): # given columns_in_expected_order = [ 'id', 'name', 'created', 'finished', 'owner', 'notes', 'size', 'tags', 'channel_abc', 'channel_def', 'parameter_abc', 'parameter_def', 'property_abc', 'property_def' ] # when # pylint: disable=protected-access sorted_columns = self.project._sort_leaderboard_columns( reversed(columns_in_expected_order)) # then self.assertEqual(columns_in_expected_order, sorted_columns) def test_full_id(self): # expect self.assertEqual(self.project.namespace + '/' + self.project.name, self.project.full_id) def test_to_string(self): # expect self.assertEqual('Project({})'.format(self.project.full_id), str(self.project)) def test_repr(self): # expect self.assertEqual('Project({})'.format(self.project.full_id), repr(self.project)) # pylint: disable=protected-access def test_get_current_experiment_from_stack(self): # given experiment = Munch(internal_id=a_uuid_string()) # when self.project._push_new_experiment(experiment) # then self.assertEqual(self.project._get_current_experiment(), experiment) # pylint: disable=protected-access def test_pop_experiment_from_stack(self): # given first_experiment = Munch(internal_id=a_uuid_string()) second_experiment = Munch(internal_id=a_uuid_string()) # and self.project._push_new_experiment(first_experiment) # when self.project._push_new_experiment(second_experiment) # then self.assertEqual(self.project._get_current_experiment(), second_experiment) # and self.project._remove_stopped_experiment(second_experiment) # and self.assertEqual(self.project._get_current_experiment(), first_experiment) # pylint: disable=protected-access def test_empty_stack(self): # expect with self.assertRaises(NoExperimentContext): self.project._get_current_experiment() def test_create_experiment_with_relative_upload_sources(self): # skip if if sys.version_info.major < 3 or (sys.version_info.major == 3 and sys.version_info.minor < 5): self.skipTest("not supported in this Python version") # given os.chdir('tests/neptune') # and anExperiment = MagicMock() self.backend.create_experiment.return_value = anExperiment # when self.project.create_experiment( upload_source_files=["test_project.*", "../../*.md"]) # then anExperiment._start.assert_called_once() self.assertTrue({ entry.target_path for entry in anExperiment._start.call_args[1] ['upload_source_entries'] } == { "CODE_OF_CONDUCT.md", "README.md", "tests/neptune/test_project.py" }) def test_create_experiment_with_absolute_upload_sources(self): # skip if if sys.version_info.major < 3 or (sys.version_info.major == 3 and sys.version_info.minor < 5): self.skipTest("not supported in this Python version") # given os.chdir('tests/neptune') # and anExperiment = MagicMock() self.backend.create_experiment.return_value = anExperiment # when self.project.create_experiment(upload_source_files=[ os.path.abspath('test_project.py'), "../../*.md" ]) # then anExperiment._start.assert_called_once() self.assertTrue({ entry.target_path for entry in anExperiment._start.call_args[1] ['upload_source_entries'] } == { "CODE_OF_CONDUCT.md", "README.md", "tests/neptune/test_project.py" }) def test_create_experiment_with_upload_single_sources(self): # given os.chdir('tests/neptune') # and anExperiment = MagicMock() self.backend.create_experiment.return_value = anExperiment # when self.project.create_experiment(upload_source_files=['test_project.py']) # then anExperiment._start.assert_called_once() self.assertTrue({ entry.target_path for entry in anExperiment._start.call_args[1] ['upload_source_entries'] } == {"test_project.py"}) def test_create_experiment_with_common_path_below_current_directory(self): # given anExperiment = MagicMock() self.backend.create_experiment.return_value = anExperiment # when self.project.create_experiment( upload_source_files=['tests/neptune/*.*']) # then anExperiment._start.assert_called_once() self.assertTrue( anExperiment._start.call_args[1]['upload_source_entries'] [0].target_path.startswith('tests/neptune/')) @patch('neptune.projects.glob', new=lambda path: [path.replace('*', 'file.txt')]) @patch('neptune.projects.os.path', new=ntpath) @patch('neptune.internal.storage.storage_utils.os.sep', new=ntpath.sep) def test_create_experiment_with_upload_sources_from_multiple_drives_on_windows( self): # given anExperiment = MagicMock() # and self.backend.create_experiment.return_value = anExperiment # when self.project.create_experiment( upload_source_files=['c:\\test1\\*', 'd:\\test2\\*']) # then anExperiment._start.assert_called_once() self.assertTrue({ entry.target_path for entry in anExperiment._start.call_args[1] ['upload_source_entries'] } == {'c:/test1/file.txt', 'd:/test2/file.txt'})