def calls_dataset_restore_archived(dataset_identifier: str, remote_dataset: RemoteDataset): with patch.object( Client, "get_remote_dataset", return_value=remote_dataset) as get_remote_dataset_mock: with patch.object(RemoteDataset, "fetch_remote_files") as fetch_remote_files_mock: with patch.object(RemoteDataset, "restore_archived") as mock: set_file_status(dataset_identifier, "restore-archived", ["one.jpg", "two.jpg"]) get_remote_dataset_mock.assert_called_once_with( dataset_identifier=dataset_identifier) fetch_remote_files_mock.assert_called_once_with( {"filenames": "one.jpg,two.jpg"}) mock.assert_called_once_with( fetch_remote_files_mock.return_value)
def run(args, parser): if args.command == "help": f.help(parser) # Authenticate user if args.command == "authenticate": api_key = getpass.getpass(prompt="API key: ", stream=None) api_key = api_key.strip() if api_key == "": print( "API Key needed, generate one for your team: https://darwin.v7labs.com/?settings=api-keys" ) return f.authenticate(api_key) print("Authentication succeeded.") # Select / List team elif args.command == "team": if args.team_name: f.set_team(args.team_name) elif args.current: f.current_team() else: f.list_teams() # Version elif args.command == "version": print("0.5.10") elif args.command == "convert": f.convert(args.format, args.files, args.output_dir) elif args.command == "dataset": if args.action == "remote": f.list_remote_datasets(args.all, args.team) elif args.action == "local": f.local(args.team) elif args.action == "create": f.create_dataset(args.dataset) elif args.action == "path": path = f.path(args.dataset) if path: print(path) # Print the url of a remote project elif args.action == "url": f.url(args.dataset) elif args.action == "push": f.upload_data(args.dataset, args.files, args.exclude, args.fps, args.path, args.frames) # Remove a project (remotely) elif args.action == "remove": f.remove_remote_dataset(args.dataset) elif args.action == "report": f.dataset_report(args.dataset, args.granularity or "day") elif args.action == "export": f.export_dataset(args.dataset, args.include_url_token, args.annotation_class, args.name) elif args.action == "files": f.list_files(args.dataset, args.status, args.path, args.only_filenames) elif args.action == "releases": f.dataset_list_releases(args.dataset) elif args.action == "pull": f.pull_dataset(args.dataset, args.only_annotations, args.folders, args.video_frames) elif args.action == "import": f.dataset_import(args.dataset, args.format, args.files) elif args.action == "convert": f.dataset_convert(args.dataset, args.format, args.output_dir) elif args.action == "migrate": f.migrate_dataset(args.dataset) elif args.action == "set-file-status": f.set_file_status(args.dataset, args.status, args.files) elif args.action == "split": f.split(args.dataset, args.val_percentage, args.test_percentage, args.seed) elif args.action == "help" or args.action is None: f.help(parser, "dataset")
def raises_if_status_not_supported(dataset_identifier: str): with pytest.raises(SystemExit) as exception: set_file_status(dataset_identifier, "unknown", []) assert exception.value.code == 1