def terminate(self, logger): """ Terminate all nodes in this configuration. """ provisioner = Provisioner(self.getConnection(), logger) provisioner.deleteNodes(self.config.getNodes()) provisioner.deleteSecurityGroups(self.getSecurityGroups())
def provision(): environment = {'description': request.args['description']} status_information = { 'time': request.args['time'], 'ip_address': request.args['ip_address'], 'customer': request.args['customer'] } p = Provisioner() p.provision(environment, status_information) return "POSTED"
def resign(args, deep=True): """ Given a tuple consisting of a path to an uncompressed archive, credential directory, and desired output path, resign accordingly. Returns a tuple of (cred_dir, path to resigned app) """ ua, cred_dir, resigned_path = args try: log.debug('resigning with %s %s -> %s', ua.path, cred_dir, resigned_path) # get the credential files, create the 'signer' credential_paths = isign.get_credential_paths(cred_dir) signer = CmsSigner(signer=Pkcs1Signer(credential_paths['key']), signer_cert_file=credential_paths['certificate'], apple_cert_file=isign.DEFAULT_APPLE_CERT_PATH) # sign it (in place) provisioning_profile = os.path.join( cred_dir, isign.DEFAULT_PROVISIONING_PROFILE_FILENAME) provisioner = Provisioner([provisioning_profile], []) ua.bundle.resign(deep, signer, provisioner) log.debug("outputing %s", resigned_path) # and archive it there ua.archive(resigned_path) finally: if ua is not None and isdir(ua.path): ua.remove() return (cred_dir, resigned_path)
def test_provisioner(self): print 'test_provisoner' result = False prov = Provisioner() prov.add_workflow(self.tutorial_dir_1, None, 10) prov.update_schedule() print_sched(prov.entries) prov.update_schedule() print_sched(prov.entries) result = True self.assertTrue(result)
def resign(input_path, deep=True, apple_cert=DEFAULT_APPLE_CERT_PATH, certificate=DEFAULT_CREDENTIAL_PATHS['certificate'], key=DEFAULT_CREDENTIAL_PATHS['key'], provisioning_profiles=None, output_path=join(os.getcwd(), "out"), signer_class=Pkcs1Signer, signer_arguments=None, info_props=None, entitlements_paths=None): """ Essentially a wrapper around archive.resign(). We initialize the CmsSigner, entitlements, and set default arguments """ if signer_arguments is None: signer_arguments = {} if key is not None: signer_arguments['keyfile'] = key signer = signer_class(**signer_arguments) cms_signer = CmsSigner(signer, apple_cert_file=apple_cert, signer_cert_file=certificate) if provisioning_profiles is None: provisioning_profiles = [DEFAULT_PROVISIONING_PROFILE_PATH] if entitlements_paths is None: entitlements_paths = [] provisioner = Provisioner(provisioning_profiles, entitlements_paths) # sanity check that all provisioning profiles match certificate? try: return archive.resign(input_path, deep, cms_signer, provisioner, output_path, info_props) except exceptions.NotSignable as e: # re-raise the exception without exposing internal # details of how it happened raise NotSignable(e)
def main(vm_limit, config_path, skip_setup, local): azure_config = None if config_path: azure_config = AzureConfig(config_path) provisioner = Provisioner(vm_limit+1, azure_config, skip_setup, local) #+manager monitor = None statistics = Statistics() # Socket setup server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.bind(('localhost', PORT)) server_socket.listen(1) server_socket.settimeout(TIMEOUT) # Wait for connections until receive a stop message done = False no_budget = False while(True): if done and ((monitor != None and monitor.logwatcher.watching_none()) \ or (monitor == None and provisioner.logwatcher.watching_none())) \ or no_budget: break try: client_socket, _addr = server_socket.accept() msg = receive(client_socket) msgs = msg.split(' ') # Stop server if '--stop' in msgs[0]: done = True elif len(msgs) == 1: if monitor != None: raise Exception("Only one workflow can be monitored at a time") # Parse workflow to monitor monitor = Monitor() wf_dir = msgs[0] monitor.add_workflow(wf_dir) else: if monitor != None: raise Exception("Only one workflow can be monitored at a time") provisioner.update_budget_timestamp() # Parse and schedule a new Workflow instance wf_dir = msgs[0] pred = msgs[1] budget = msgs[2] provisioner.add_workflow(wf_dir, prediction_file=pred, budget=budget) try: provisioner.update_schedule() except BudgetException: no_budget = True client_socket.close() except timeout: if monitor == None and provisioner.workflow.jobs: provisioner.update_budget_timestamp() # Update and sync vms provisioner.allocate_new_vms() provisioner.deallocate_vms() provisioner.sync_machines() # Update, sync jobs, may reschedule try: provisioner.update_jobs() except BudgetException: no_budget = True # Statistics provisioner.update_wf_pred() statistics.schedshot(provisioner) statistics.snapshot(provisioner.timestamp, provisioner.schedule.entries, provisioner.machines) elif monitor and monitor.workflow.jobs: monitor.update_timestamp() monitor.sync_machines() monitor.sync_jobs() statistics.snapshot(monitor.timestamp, monitor.entries, monitor.machines) sys.stdout.flush() if monitor == None: entries = provisioner.schedule.entries else: entries = monitor.entries statistics.jobs(entries) statistics.dump() if provisioner.exp: try: provisioner.exp.deprovision() except Exception: pass condor_rm_jobs()
def provision(self, logger): """ Provision configuration. """ provisioner = Provisioner(self.getConnection(), logger) provisioner.provisionSecurityGroups(self.getSecurityGroups()) provisioner.provisionNodes(self.config.getNodes()) provisioner.verify(self.config.getNodes()) provisioner.provisionSecurityGroups(self.getSecurityGroups(), self.config.getNodes()) provisioner.createElasticIps(self.config.getNodes())
import sys import json import argparse from provisioner import Provisioner if not sys.stdin.isatty(): env_obj = json.loads(sys.stdin.read()) else: parser = argparse.ArgumentParser(description="") parser.add_argument('-e', '--environment', help='read environment from argument') args = parser.parse_args() if args.environment: with open(args.environment, 'rb') as env_file: env_obj = json.loads(env_file.read()) sys.stdout.write(json.dumps(env_obj)) #used for testing p = Provisioner() p.provision(env=env_obj)
def main(vm_limit, config_path, skip_setup, local): azure_config = None if config_path: azure_config = AzureConfig(config_path) provisioner = Provisioner(vm_limit + 1, azure_config, skip_setup, local) #+manager monitor = None statistics = Statistics() # Socket setup server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_socket.bind(('localhost', PORT)) server_socket.listen(1) server_socket.settimeout(TIMEOUT) # Wait for connections until receive a stop message done = False no_budget = False while (True): if done and ((monitor != None and monitor.logwatcher.watching_none()) \ or (monitor == None and provisioner.logwatcher.watching_none())) \ or no_budget: break try: client_socket, _addr = server_socket.accept() msg = receive(client_socket) msgs = msg.split(' ') # Stop server if '--stop' in msgs[0]: done = True elif len(msgs) == 1: if monitor != None: raise Exception( "Only one workflow can be monitored at a time") # Parse workflow to monitor monitor = Monitor() wf_dir = msgs[0] monitor.add_workflow(wf_dir) else: if monitor != None: raise Exception( "Only one workflow can be monitored at a time") provisioner.update_budget_timestamp() # Parse and schedule a new Workflow instance wf_dir = msgs[0] pred = msgs[1] budget = msgs[2] provisioner.add_workflow(wf_dir, prediction_file=pred, budget=budget) try: provisioner.update_schedule() except BudgetException: no_budget = True client_socket.close() except timeout: if monitor == None and provisioner.workflow.jobs: provisioner.update_budget_timestamp() # Update and sync vms provisioner.allocate_new_vms() provisioner.deallocate_vms() provisioner.sync_machines() # Update, sync jobs, may reschedule try: provisioner.update_jobs() except BudgetException: no_budget = True # Statistics provisioner.update_wf_pred() statistics.schedshot(provisioner) statistics.snapshot(provisioner.timestamp, provisioner.schedule.entries, provisioner.machines) elif monitor and monitor.workflow.jobs: monitor.update_timestamp() monitor.sync_machines() monitor.sync_jobs() statistics.snapshot(monitor.timestamp, monitor.entries, monitor.machines) sys.stdout.flush() if monitor == None: entries = provisioner.schedule.entries else: entries = monitor.entries statistics.jobs(entries) statistics.dump() if provisioner.exp: try: provisioner.exp.deprovision() except Exception: pass condor_rm_jobs()