def __init__(self, args): self.conn = qarnot.Connection(client_token=keys['token']) self.args = args self.name = args.name # print(self.conn.profiles()) if args.list: for t in self.conn.tasks(): print(str(t)) return if args.uuid: task = self.conn.retrieve_task(args.uuid) if task: print(GREEN + "Task found `" + YELLOW + task.uuid + RESET + "`!" + RESET) else: print(RED + "Error: no task found" + RESET) # TODO Exit with status 1 return if args.stdout: self.fetch_fresh_output(task) if args.retrieve: dir = 'output-' + args.uuid print("\tDownloading results from task in " + YELLOW + dir + RESET + " directory...") task.results.get_all_files(dir) if args.abort: task.abort() print("Aborted task `" + YELLOW + task.uuid + RESET + "`") elif args.command: task = self.prepare_task() self.prepare_docker(task) # if args.directory: self.import_folder(args.directory, task, False) self.launch(task) else: print(RED + "Please specify an action to perform" + RESET)
args = sys.argv[1:] # Parse input files and store them as a list input_files = parser.parse_known_args(args)[0].i # Build the full command line ffmpeg_cmd = ' '.join(args) # Display that we parsed print("** FFMPEG command: %s" % ffmpeg_cmd) print("** Input files: %s" % ', '.join(input_files)) # Edit 'samples.conf' to provide your own credentials # Create a connection, from which all other objects will be derived conn = qarnot.Connection('samples.conf') # Create a task. The 'with' statement ensures that the task will be # deleted in the end, to prevent tasks from continuing to run after # a Ctrl-C for instance task = conn.create_task('sample4-ffmpeg', 'docker-batch', 1) # Store if an error happened during the process error_happened = False try: # Set the command to run when launching the container, by overriding a # constant. # Task constants are the main way of controlling a task's behaviour task.constants['DOCKER_REPO'] = 'jrottenberg/ffmpeg' task.constants['DOCKER_TAG'] = 'ubuntu'
if not finished[i] and t.wait(0.1): finished[i] = True print( f'task {t.name} finished with state {t.state}. {finished.count(True)}/{len(finished)}' ) if t.state != 'Success': raise RuntimeError( f'task {t.name} failed: {t.errors}.\n' + 'See https://console.qarnot.com/app/tasks for more info' ) sleep(1) pass if __name__ == '__main__': conn = qarnot.Connection('qarnot.conf') job = conn.create_job('rom-job', useDependencies=True) job.submit() DOCKER_REPO = 'qarnotlab/pymor_fenics' DOCKER_TAG = '2020.2.0_2019.1.0' TRAIN_PARAM_NB = 120 TRAIN_INST = 30 VAL_PARAM_NB = 50 VAL_INST = 25 RB_SIZE = 50 input_bucket = conn.create_bucket('input') input_bucket.sync_directory('input') fom_res_bucket = conn.create_bucket('fom-results')
def test_connection_with_bad_ssl_return_the_good_exception(self): with pytest.raises(requests.exceptions.SSLError): assert qarnot.Connection(cluster_url="https://expired.badssl.com", client_token="token")
#!/usr/bin/env python3 import sys import qarnot import os # Create a connection, from which all other objects will be derived # Enter client token here conn = qarnot.Connection( '/Users/roxanefischer/Desktop/single_path_nas/single-path-nas/qarnot_script/samples.conf' ) import argparse parser = argparse.ArgumentParser() parser.add_argument('-n', '--filename', help=('The filename containing the input data. ' 'If a filename is not given then data is ' 'read from stdin.'), default='nas_search') args = parser.parse_args() # Create a task #task = conn.create_task(args.filename, 'docker-nvidia-network', 1) task = conn.create_task(args.filename, 'docker-batch', 5) # Store if an error happened during the process error_happened = False try: import pdb
def test_connection_with_bad_ssl_and_uncheck_return_JSONDecodeError_exception( self): with pytest.raises(simplejson.errors.JSONDecodeError): assert qarnot.Connection(cluster_url="https://expired.badssl.com", client_token="token", cluster_unsafe=True)
def __init__(self, args=None): if False: self.conn = qarnot.Connection(client_token=keys['token'])
def get_connection(self, mock_get): mock_get.return_value.status_code = 200 connec = qarnot.Connection(client_token="token", cluster_url="https://localhost", storage_url="https://localhost") return connec
#!/usr/bin/env python # Import the Qarnot sdk import qarnot # Connection to the Qarnot platform conn = qarnot.Connection(client_token=' --- Your token here --- ') # Creation of the task task = conn.create_task("Ascendance", "docker-batch", 1) # Creation of an input bucket and synchronization with a local file bucket = conn.create_bucket("input") bucket.sync_directory("UseCaseAscendance") # Creation of an output bucket and synchronization with the task output_bucket = conn.create_bucket("output") task.results = output_bucket task.resources = [bucket] # Docker image and command to be run in he container task.constants["DOCKER_REPO"] = "docker/image" task.constants["DOCKER_TAG"] = "default_latest" task.constants["DOCKER_CMD"] = "/job/run" # Task submission task.submit()
## launch_pytorch.py #!/usr/bin/env python import sys import qarnot import os from config import keys # Edit 'MY_TOKEN' to provide your own credentials # Create a connection, from which all other objects will be derived token = keys['token'] conn = qarnot.Connection(client_token=token) # Create a task task = conn.create_task('pytorch_demo', 'docker-batch', 1) # Store if an error happened during the process error_happened = False try: # Create a resource bucket and add input files input_bucket = conn.create_bucket('pytorch-input') input_bucket.sync_directory('input') input_bucket.add_file('wesh.py') # Attach the bucket to the task task.resources.append(input_bucket) # Create a result bucket and attach it to the task output_bucket = conn.create_bucket('pytorch-output') task.results = output_bucket