def test_list_json_instances(script_runner, mock_data_dir): metadata_manager = MetadataManager(namespace=METADATA_TEST_NAMESPACE) ret = script_runner.run('elyra-metadata', 'list', METADATA_TEST_NAMESPACE, '--json') assert ret.success lines = ret.stdout.split('\n') assert len(lines) == 2 # always 2 more than the actual runtime count assert lines[0].startswith( "No metadata instances found for {}".format(METADATA_TEST_NAMESPACE)) valid = Metadata(**valid_metadata_json) resource = metadata_manager.create('valid', valid) assert resource is not None resource = metadata_manager.create('valid2', valid) assert resource is not None another = Metadata(**another_metadata_json) resource = metadata_manager.create('another', another) assert resource is not None resource = metadata_manager.create('another2', another) assert resource is not None ret = script_runner.run('elyra-metadata', 'list', METADATA_TEST_NAMESPACE, '--json') assert ret.success assert ret.stderr == '' # Consume results results = json.loads(ret.stdout) assert len(results) == 4 # Remove the '2' runtimes and reconfirm smaller set metadata_manager.remove('valid2') metadata_manager.remove('another2') # Include two additional invalid files as well - one for uri failure, andother missing display_name metadata_dir = os.path.join(mock_data_dir, 'metadata', METADATA_TEST_NAMESPACE) create_json_file(metadata_dir, 'invalid.json', invalid_metadata_json) create_json_file(metadata_dir, 'no_display_name.json', invalid_no_display_name_json) ret = script_runner.run('elyra-metadata', 'list', METADATA_TEST_NAMESPACE, '--json') assert ret.success results = json.loads(ret.stdout) assert len(results) == 4 ret = script_runner.run('elyra-metadata', 'list', METADATA_TEST_NAMESPACE, '--json', '--valid-only') assert ret.success results = json.loads(ret.stdout) assert len(results) == 2
def test_list_instances(script_runner, mock_data_dir): metadata_manager = MetadataManager(namespace=METADATA_TEST_NAMESPACE) ret = script_runner.run('elyra-metadata', 'list', METADATA_TEST_NAMESPACE) assert ret.success lines = ret.stdout.split('\n') assert len(lines) == 2 # always 2 more than the actual runtime count assert lines[0].startswith( "No metadata instances found for {}".format(METADATA_TEST_NAMESPACE)) valid = Metadata(**valid_metadata_json) resource = metadata_manager.create('valid', valid) assert resource is not None resource = metadata_manager.create('valid2', valid) assert resource is not None another = Metadata(**another_metadata_json) resource = metadata_manager.create('another', another) assert resource is not None resource = metadata_manager.create('another2', another) assert resource is not None ret = script_runner.run('elyra-metadata', 'list', METADATA_TEST_NAMESPACE) assert ret.success lines = ret.stdout.split('\n') assert len(lines) == 9 # always 5 more than the actual runtime count assert lines[ 0] == "Available metadata instances for {} (includes invalid):".format( METADATA_TEST_NAMESPACE) line_elements = [line.split() for line in lines[4:8]] assert line_elements[0][0] == "metadata-test" assert line_elements[0][1] == "another" assert line_elements[1][0] == "metadata-test" assert line_elements[1][1] == "another2" assert line_elements[2][0] == "metadata-test" assert line_elements[2][1] == "valid" assert line_elements[3][0] == "metadata-test" assert line_elements[3][1] == "valid2" assert ret.stderr == '' # Remove the '2' runtimes and reconfirm smaller set metadata_manager.remove('valid2') metadata_manager.remove('another2') # Include two additional invalid files as well - one for uri failure, andother missing display_name metadata_dir = os.path.join(mock_data_dir, 'metadata', METADATA_TEST_NAMESPACE) create_json_file(metadata_dir, 'invalid.json', invalid_metadata_json) create_json_file(metadata_dir, 'no_display_name.json', invalid_no_display_name_json) create_json_file(metadata_dir, 'invalid_schema_name.json', invalid_schema_name_json) ret = script_runner.run('elyra-metadata', 'list', METADATA_TEST_NAMESPACE) assert ret.success lines = ret.stdout.split('\n') assert len(lines) == 10 # always 5 more than the actual runtime count assert lines[ 0] == "Available metadata instances for {} (includes invalid):".format( METADATA_TEST_NAMESPACE) line_elements = [line.split() for line in lines[4:9]] assert line_elements[0][1] == "another" assert line_elements[1][1] == "invalid" assert line_elements[1][3] == "**INVALID**" assert line_elements[1][4] == "(ValidationError)" assert line_elements[2][3] == "**INVALID**" assert line_elements[2][4] == "(ValidationError)" assert line_elements[3][1] == "valid" assert line_elements[4][3] == "**INVALID**" assert line_elements[4][4] == "(SchemaNotFoundError)" ret = script_runner.run('elyra-metadata', 'list', METADATA_TEST_NAMESPACE, '--valid-only') assert ret.success lines = ret.stdout.split('\n') assert len(lines) == 7 # always 5 more than the actual runtime count assert lines[ 0] == "Available metadata instances for {} (valid only):".format( METADATA_TEST_NAMESPACE) line_elements = [line.split() for line in lines[4:6]] assert line_elements[0][1] == "another" assert line_elements[1][1] == "valid"
def test_list_instances(script_runner, mock_runtime_dir): metadata_manager = MetadataManager(namespace='elyra-metadata-tests') ret = script_runner.run('elyra-metadata', 'list', 'elyra-metadata-tests') assert ret.success lines = ret.stdout.split('\n') assert len(lines) == 2 # always 2 more than the actual runtime count assert lines[0].startswith( "No metadata instances available for elyra-metadata-tests at:") valid = Metadata(**valid_metadata_json) resource = metadata_manager.add('valid', valid) assert resource is not None resource = metadata_manager.add('valid2', valid) assert resource is not None another = Metadata(**another_metadata_json) resource = metadata_manager.add('another', another) assert resource is not None resource = metadata_manager.add('another2', another) assert resource is not None ret = script_runner.run('elyra-metadata', 'list', 'elyra-metadata-tests') assert ret.success lines = ret.stdout.split('\n') assert len(lines) == 9 # always 5 more than the actual runtime count assert lines[ 0] == "Available metadata instances for elyra-metadata-tests (includes invalid):" line_elements = [line.split() for line in lines[4:8]] assert line_elements[0][0] == "test" assert line_elements[0][1] == "another" assert line_elements[1][0] == "test" assert line_elements[1][1] == "another2" assert line_elements[2][0] == "test" assert line_elements[2][1] == "valid" assert line_elements[3][0] == "test" assert line_elements[3][1] == "valid2" assert ret.stderr == '' # Remove the '2' runtimes and reconfirm smaller set metadata_manager.remove('valid2') metadata_manager.remove('another2') # Include an invalid file as well metadata_dir = os.path.join(mock_runtime_dir, 'metadata', 'elyra-metadata-tests') create_json_file(metadata_dir, 'invalid.json', invalid_metadata_json) ret = script_runner.run('elyra-metadata', 'list', 'elyra-metadata-tests') assert ret.success lines = ret.stdout.split('\n') assert len(lines) == 8 # always 5 more than the actual runtime count assert lines[ 0] == "Available metadata instances for elyra-metadata-tests (includes invalid):" line_elements = [line.split() for line in lines[4:7]] assert line_elements[0][1] == "another" assert line_elements[1][1] == "invalid" assert line_elements[1][3] == "**INVALID**" assert line_elements[1][4] == "(ValidationError)" assert line_elements[2][1] == "valid" ret = script_runner.run('elyra-metadata', 'list', 'elyra-metadata-tests', '--valid-only') assert ret.success lines = ret.stdout.split('\n') assert len(lines) == 7 # always 5 more than the actual runtime count assert lines[ 0] == "Available metadata instances for elyra-metadata-tests (valid only):" line_elements = [line.split() for line in lines[4:6]] assert line_elements[0][1] == "another" assert line_elements[1][1] == "valid"