def setUp(self): """ It creates the memory db """ db.create_all() # We store some Applications in the db for the tests application_1 = Application() application_1.name = 'AppName_1' application_2 = Application() application_2.name = 'AppName_2' # Adding executing scripts execution_script_1 = ExecutionConfiguration() execution_script_1.execution_type = "slurm:sbatch" execution_script_2 = ExecutionConfiguration() execution_script_2.execution_type = "slurm:sbatch2" application_2.execution_configurations = [ execution_script_1, execution_script_2 ] db.session.add(application_1) db.session.add(application_2) # We store some testbeds in the db for the tests testbed_1 = Testbed("name_1", True, "slurm", "ssh", "user@server", ['slurm']) testbed_2 = Testbed("name_2", False, "slurm", "ssh", "user@server", ['slurm']) testbed_3 = Testbed("name_3", True, "slurm", "ssh", "user@server", ['slurm', 'slurm:singularity']) db.session.add(testbed_1) db.session.add(testbed_2) db.session.add(testbed_3) db.session.commit() deployment = Deployment() deployment.executable_id = execution_script_1.id deployment.testbed_id = testbed_1.id db.session.add(deployment) # We store some nodes in the db for the tests node_1 = Node() node_1.name = "node_1" node_1.information_retrieved = True node_2 = Node() node_2.name = "node_2" node_2.information_retrieved = False db.session.add(node_1) db.session.add(node_2) execution = Execution() execution.execution_type = "execution_type" execution.status = "status" db.session.add(execution) db.session.commit()
def insert_raw_deployment(session, author_name, module_name, tags, occured_at): # TODO:greghaynes This could make a lot less round trips to the DB author = insert_or_get_author(session, author_name) module = insert_or_get_module(session, module_name) tags = insert_or_get_tags(session, tags) deployment = Deployment(author.id, module.id, occured_at) session.add(deployment) deployment.tags = tags session.commit() return deployment
def create_new_deployment(repo, hash, type): #create a new deployment and an async task to parse it deployment = Deployment(repo = repo, hash = hash, type = type, status = DeploymentStatus.SCHEDULED) #save this deployment deployment.save() #the async task will mark this deployment as STARTED from async.celery import process_deploy process_deploy.delay(deployment)
def create(self, request, *args, **kwargs): """ Generates db deployment object """ d = Deployment() s = DeploymentSerializer(d) image = request.data['image'].lower() image_tag = image cookbook = request.data['cookbook'] user = request.user.username recipe = request.data['recipe'] if 'recipe' in request.data.keys() else 'default.rb' system = request.data['system'].lower() d.cookbook = CookBook.objects.get(name=cookbook, user=user) d.recipe = Recipe.objects.get(name=recipe, cookbook=d.cookbook, user=user) d.user = str(request.user.username) # Detect image if ":" in image: image_name, image_version = image.split(":") try: i = Image.objects.get(name=image_name.lower(), version=image_version.lower(), system=system) image_tag = i.tag except Image.DoesNotExist: pass except Image.MultipleObjectsReturned: return Response({'detail': 'Multiple images found for [%s]' % image}, status=status.HTTP_400_BAD_REQUEST) try: i = Image.objects.get(tag=image_tag) except Image.DoesNotExist: return Response({'detail': 'Image not found: [%s]' % image}, status=status.HTTP_400_BAD_REQUEST) d.image = i d.save() return Response(s.data, status=status.HTTP_201_CREATED)
def manage(): new_deploy = NewDeploymentForm() new_deploy.rpi_model.choices = [(key, value) for key, value in PiModels] new_deploy.profile_id.choices = [(p.id, p.name) for p in Profile.query.all()] new_deploy.collector_type.choices = [(key, value) for key, value in CollectorTypes] if request.is_xhr: result = {'status': 'error', 'errors': ['invalid action']} if new_deploy.validate_on_submit(): # Generate random keys encryption_key = binascii.hexlify(Random.new().read(16)) chars = string.ascii_letters + string.digits instance_key = ''.join(chars[ord(os.urandom(1)) % len(chars)] for i in range(16)) mac_key = ''.join(chars[ord(os.urandom(1)) % len(chars)] for i in range(32)) deployment = Deployment( new_deploy.name.data, new_deploy.profile_id.data, instance_key, mac_key, encryption_key, PiModels.from_string(new_deploy.rpi_model.data), new_deploy.server_ip.data, new_deploy.interface.data, new_deploy.wlan_configuration.data, new_deploy.hostname.data, new_deploy.rootpw.data, new_deploy.debug.data, CollectorTypes.from_string(new_deploy.collector_type.data)) g.db.add(deployment) g.db.commit() result['status'] = 'success' result['id'] = deployment.id else: errors = [] for err in new_deploy.errors.itervalues(): errors.extend(err) result['errors'] = errors return jsonify(result) if sys.platform.startswith("linux"): addrs = ni.ifaddresses('eth0') new_deploy.server_ip.data = addrs[ni.AF_INET][0]['addr'] print('Setting default ip: %s' % addrs[ni.AF_INET][0]['addr']) else: print('Windows platform is currently unsupported; interfaces below ' 'for debug purposes') print(ni.interfaces()) return { 'deployments': Deployment.query.order_by(Deployment.name.asc()), 'form': new_deploy }
def create(self, request, *args, **kwargs): """ Generates db deployment object """ d = Deployment() s = DeploymentSerializer(d) image = request.data['image'].lower() image_tag = image cookbook = request.data['cookbook'] user = request.user.username recipe = request.data['recipe'] if 'recipe' in request.data.keys( ) else 'default.rb' system = request.data['system'].lower() d.cookbook = CookBook.objects.get(name=cookbook, user=user) d.recipe = Recipe.objects.get(name=recipe, cookbook=d.cookbook, user=user) d.user = str(request.user.username) # Detect image if ":" in image: image_name, image_version = image.split(":") try: i = Image.objects.get(name=image_name.lower(), version=image_version.lower(), system=system) image_tag = i.tag except Image.DoesNotExist: pass except Image.MultipleObjectsReturned: return Response( {'detail': 'Multiple images found for [%s]' % image}, status=status.HTTP_400_BAD_REQUEST) try: i = Image.objects.get(tag=image_tag) except Image.DoesNotExist: return Response({'detail': 'Image not found: [%s]' % image}, status=status.HTTP_400_BAD_REQUEST) d.image = i d.save() return Response(s.data, status=status.HTTP_201_CREATED)
def test_patch_execution_script_preprocessor(self, mock_execute_application): """ Verifies the correct work of the function. """ # First we verify that nothing happens if launch_execution = False data = {'launch_execution': False} response = self.client.patch("/api/v1/execution_configurations/1", data=json.dumps(data), content_type='application/json') self.assertEquals(200, response.status_code) execution_script = response.json self.assertEquals("slurm:sbatch", execution_script['execution_type']) """ If the execution_script has not assigned a testbed we give an error returning a 409 Conflict in the resource """ data = {'launch_execution': True} response = self.client.patch("/api/v1/execution_configurations/1", data=json.dumps(data), content_type='application/json') self.assertEquals(409, response.status_code) execution_script = response.json self.assertEquals( 'No deployment configured to execute the application', response.json['message']) """ Now we have an off-line testbed testbed to submit the execution """ testbed = Testbed("name", False, "slurm", "ssh", "user@server", ['slurm']) db.session.add(testbed) executable = Executable() executable.source_code_file = 'source_code_file' executable.compilation_script = 'compilation_script' executable.compilation_type = 'compilation_type' db.session.add(executable) db.session.commit() execution_script = db.session.query(ExecutionConfiguration).filter_by(id=1).first() execution_script.testbed = testbed execution_script.executable = executable db.session.commit() deployment = Deployment() deployment.executable_id = executable.id deployment.testbed_id = testbed.id db.session.add(deployment) db.session.commit() data = {'launch_execution': True} response = self.client.patch("/api/v1/execution_configurations/1", data=json.dumps(data), content_type='application/json') self.assertEquals(403, response.status_code) self.assertEquals( 'Testbed does not allow on-line connection', response.json['message']) """ Now we are able to launch the execution """ testbed.on_line = True db.session.commit() data = {'launch_execution': True} response = self.client.patch("/api/v1/execution_configurations/1", data=json.dumps(data), content_type='application/json') self.assertEquals(200, response.status_code) """ Now we are able to lauch the execution with create_profile= True """ data = { 'launch_execution': True, 'create_profile': True } response = self.client.patch("/api/v1/execution_configurations/1", data=json.dumps(data), content_type='application/json') """ Now we are able to lauch the execution with use_storaged_profile=true and create_profile=True """ data = { 'launch_execution': True, 'create_profile': True, 'use_storaged_profile': True } response = self.client.patch("/api/v1/execution_configurations/1", data=json.dumps(data), content_type='application/json') """ Now we are able to lauch the execution with use_storaged_profile=true and Execution_Configuration without any profile storaged on it. """ data = { 'launch_execution': True, 'use_storaged_profile': True } response = self.client.patch("/api/v1/execution_configurations/1", data=json.dumps(data), content_type='application/json') """ Now we are able to lauch the execution with use_storaged_profile=true and Execution_Configuration with a profile storaged on it. """ data = { 'launch_execution': True, 'use_storaged_profile': True } execution_script = db.session.query(ExecutionConfiguration).filter_by(id=1).first() execution_script.profile_file = 'pepito.profile' db.session.commit() response = self.client.patch("/api/v1/execution_configurations/1", data=json.dumps(data), content_type='application/json') call_1 = call(execution_script, False, False) call_2 = call(execution_script, True, False) call_3 = call(execution_script, True, False) call_4 = call(execution_script, False, False) call_5 = call(execution_script, False, True) calls = [ call_1, call_2, call_3, call_4, call_5 ] mock_execute_application.assert_has_calls(calls)
def test_execute_application_type_torque_qsub(self, mock_shell, mock_add_nodes): """ It verifies that the application type slurm sbatch is executed """ # First we verify that the testbed is of type TORQUE to be able # to execute it, in this case it should give an error since it is # not of type torque # We define the different entities necessary for the test. testbed = Testbed( name="nova2", on_line=True, category="xxxx", protocol="SSH", endpoint="*****@*****.**", package_formats=['sbatch', 'SINGULARITY'], extra_config={ "enqueue_compss_sc_cfg": "nova.cfg", "enqueue_env_file": "/home_nfs/home_ejarquej/installations/rc1707/COMPSs/compssenv" }) db.session.add(testbed) application = Application(name="super_app") db.session.add(application) db.session.commit() # So application and testbed get an id executable = Executable() executable.compilation_type = Executable.__type_torque_qsub__ executable.executable_file = "pepito.sh" db.session.add(executable) db.session.commit() # We do this so executable gets and id deployment = Deployment() deployment.testbed_id = testbed.id deployment.executable_id = executable.id db.session.add( deployment) # We add the executable to the db so it has an id execution_config = ExecutionConfiguration() execution_config.execution_type = Executable.__type_torque_qsub__ execution_config.application = application execution_config.testbed = testbed execution_config.executable = executable db.session.add(execution_config) db.session.commit() execution = Execution() execution.execution_type = Executable.__type_torque_qsub__ execution.status = Execution.__status_submitted__ torque.execute_batch(execution, execution_config.id) self.assertEquals(Execution.__status_failed__, execution.status) self.assertEquals("Testbed does not support TORQUE:QSUB applications", execution.output) # If the testbed is off-line, execution isn't allowed also testbed.category = Testbed.torque_category testbed.on_line = False db.session.commit() execution = Execution() execution.execution_type = Executable.__type_torque_qsub__ execution.status = Execution.__status_submitted__ torque.execute_batch(execution, execution_config.id) self.assertEquals(Executable.__type_torque_qsub__, execution.execution_type) self.assertEquals(Execution.__status_failed__, execution.status) self.assertEquals("Testbed is off-line", execution.output) ## Test executing output = b'1208.cloudserver' mock_shell.return_value = output testbed.category = Testbed.torque_category testbed.on_line = True db.session.commit() execution = Execution() execution.execution_type = Executable.__type_torque_qsub__ execution.status = Execution.__status_submitted__ torque.execute_batch(execution, execution_config.id) mock_shell.assert_called_with("qsub", "*****@*****.**", ["pepito.sh"]) execution = db.session.query(Execution).filter_by( execution_configuration_id=execution_config.id).first() self.assertEqual(execution.execution_type, execution_config.execution_type) self.assertEqual(execution.status, Execution.__status_running__) self.assertEqual("1208.cloudserver", execution.batch_id)
def test_execute_srun(self, mock_shell): """ Verifyies the correct work of the function "execute_srun" """ # We define the different entities necessaryb for the test. testbed = Testbed( name="nova2", on_line=True, category="SLURM", protocol="SSH", endpoint="*****@*****.**", package_formats=['sbatch', 'SINGULARITY'], extra_config={ "enqueue_compss_sc_cfg": "nova.cfg", "enqueue_env_file": "/home_nfs/home_ejarquej/installations/rc1707/COMPSs/compssenv" }) application = Application(name="super_app") executable = Executable() executable.source_code_file = 'test.zip' executable.compilation_script = 'gcc -X' executable.compilation_type = "SINGULARITY:SRUN" executable.singularity_app_folder = "/singularity/app/folder" executable.singularity_image_file = "pepito.img" executable.status = "COMPILED" executable.application = application deployment = Deployment() deployment.testbed_id = testbed.id deployment.executable_id = executable.id deployment.path = "/pepito/pepito.img" execution_config = ExecutionConfiguration() execution_config.execution_type = "SINGULARITY:SRUN" execution_config.application = application execution_config.testbed = testbed execution_config.executable = executable execution_config.num_nodes = 2 #execution_config.num_gpus_per_node = 2 execution_config.num_cpus_per_node = 16 execution_config.exec_time = 10 execution_config.command = "/apps/application/master/Matmul 2 1024 12.34 /home_nfs/home_ejarquej/demo_test/cpu_gpu_run_data" execution_config.compss_config = "--worker_in_master_cpus=12 --worker_in_master_memory=24000 --worker_working_dir=/home_nfs/home_ejarquej --lang=c --monitoring=1000 -d" output = b' JOBID PARTITION NAME USER ST TIME NODES NODELIST(REASON)\n 4610 all singular garciad R 0:01 2 ns[55-56]\n' mock_shell.return_value = output # TEST starts here: output_srun = slurm.execute_srun(testbed, execution_config, executable, deployment, True) call_1 = call('(', "*****@*****.**", [ "srun", "-N", "2", "-n", "16", "singularity", "run", "/pepito/pepito.img", ">", "allout.txt", "2>&1", "&", ")", ";", "sleep", "1;", "squeue" ]) # adding a new type of execution execution_config = ExecutionConfiguration() execution_config.execution_type = "SINGULARITY:SRUN" execution_config.application = application execution_config.testbed = testbed execution_config.executable = executable execution_config.num_gpus_per_node = 2 execution_config.num_cpus_per_node = 16 execution_config.exec_time = 10 execution_config.command = "/apps/application/master/Matmul 2 1024 12.34 /home_nfs/home_ejarquej/demo_test/cpu_gpu_run_data" execution_config.compss_config = "--worker_in_master_cpus=12 --worker_in_master_memory=24000 --worker_working_dir=/home_nfs/home_ejarquej --lang=c --monitoring=1000 -d" self.assertEquals(output, output_srun) output_srun = slurm.execute_srun(testbed, execution_config, executable, deployment, True) call_2 = call('(', "*****@*****.**", [ "srun", "--gres=gpu:2", "-n", "16", "singularity", "run", "/pepito/pepito.img", ">", "allout.txt", "2>&1", "&", ")", ";", "sleep", "1;", "squeue" ]) calls = [call_1, call_2] mock_shell.assert_has_calls(calls) self.assertEquals(output, output_srun)
'ro': True }, '/opt/techdev/deployr2/deployments/%s/frontend/www' % deployment_name: { 'bind': '/usr/share/nginx/html', 'ro': True } }) inspect = c.inspect_container('frontend_' + deployment_name) frontend_port = inspect['NetworkSettings']['Ports']['80/tcp'][0][ 'HostPort'] # Help!!! # create an entity so we know of this deployment later from django.utils import timezone deployment = Deployment(id=deployment_name, create_date=timezone.now()) deployment.save() import augeas a = augeas.Augeas(root='/') a.defvar('host', '/files/etc/apache2/sites-available/deployr.conf') a.defnode('newloc', '$host/VirtualHost/Location[last() + 1]', '') a.set('$newloc/arg', '/' + deployment_name) a.set('$newloc/directive', 'ProxyPass') a.set('$newloc/*[self::directive="ProxyPass"]/arg[1]', 'http://localhost:' + frontend_port) a.save() return HttpResponseRedirect( reverse('deployr2:deployment', kwargs={'deployment_name': deployment_name}))