def trg_create(self, uid, res_type, res_id, cr): ident = (uid,res_type,res_id) self.wkf_on_create_cache.setdefault(cr.dbname, {}) if res_type in self.wkf_on_create_cache[cr.dbname]: wkf_ids = self.wkf_on_create_cache[cr.dbname][res_type] else: cr.execute('select id from wkf where osv=%s and on_create=True', (res_type,)) wkf_ids = cr.fetchall() self.wkf_on_create_cache[cr.dbname][res_type] = wkf_ids for (wkf_id,) in wkf_ids: instance.create(cr, ident, wkf_id)
def main(): if error is not None: print("recovery mode, imports failed") proto_gen() return manager = Manager(create()) @manager.command def generate(): """generate base classes/interfaces/handlers from proto-scheme""" proto_gen() @manager.command def docs(): """generate documentation""" base_cwd = os.getcwd() RUN_PATH = os.path.dirname(os.path.abspath(__file__)) DOC_ROOT = os.path.join(RUN_PATH, 'docs') os.chdir(DOC_ROOT) shutil.rmtree('source/autogenerated', ignore_errors=True) subprocess.check_call(['sh', 'build_docs.sh']) subprocess.check_call(['sh', 'build_docs.sh']) os.chdir(base_cwd) @manager.command def runserver(): """run development server""" manager.app = initialize() manager.app.run() manager.run()
def trg_create(self, uid, res_type, res_id, cr): """ Create a new workflow instance :param res_type: the model name :param res_id: the model instance id to own the created worfklow instance :param cr: a database cursor """ ident = (uid,res_type,res_id) self.wkf_on_create_cache.setdefault(cr.dbname, {}) if res_type in self.wkf_on_create_cache[cr.dbname]: wkf_ids = self.wkf_on_create_cache[cr.dbname][res_type] else: cr.execute('select id from wkf where osv=%s and on_create=True', (res_type,)) wkf_ids = cr.fetchall() self.wkf_on_create_cache[cr.dbname][res_type] = wkf_ids for (wkf_id,) in wkf_ids: instance.create(cr, ident, wkf_id)
def create_rt_vdcm(test_server_instance_id, vdcm_version, ip_address, key_name=None, flavor='t2.small'): env = get_rt_environment_details( test_server_instance_id=test_server_instance_id) ami = get_vdcm_ami(vdcm_version=vdcm_version) user_data = VDCM_RT_USER_DATA.format( ntp_server=env['server_private_ip_address']) eth0 = interface(name='eth0', subnet_id=env['private_subnet_id'], ip_address=ip_address, security_group_id=env['sg_private_id']) eth1 = interface(name='eth1', subnet_id=env['video_subnet_id'], security_group_id=env['sg_video_id']) if not key_name: key_name = env['server_key_name'] tags = {} tags.update(env['server_default_tags']) tags.update(env['server_extra_tags']) try: instance_id = create(name='{}-vdcm-{}'.format( env['server_default_tags']['Environment Name'], ip_address), role='regression-test-vdcm', ami=ami, flavor=flavor, key_name=key_name, wait_until_running=True, user_data=user_data, tags=tags, interfaces=[eth0, eth1]) except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == 'InvalidIPAddress.InUse': raise (InstanceExists(ip_address)) else: raise return instance_id
def _execute(cr, workitem, activity, ident, stack): result = True # # send a signal to parent workflow (signal: subflow.signal_name) # signal_todo = [] if (workitem['state']=='active') and activity['signal_send']: cr.execute("select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (workitem['inst_id'],)) for i in cr.fetchall(): signal_todo.append((i[0], (ident[0],i[1],i[2]), activity['signal_send'])) if activity['kind']=='dummy': if workitem['state']=='active': _state_set(cr, workitem, activity, 'complete', ident) if activity['action_id']: res2 = wkf_expr.execute_action(cr, ident, workitem, activity) if res2: stack.append(res2) result=res2 elif activity['kind']=='function': if workitem['state']=='active': _state_set(cr, workitem, activity, 'running', ident) returned_action = wkf_expr.execute(cr, ident, workitem, activity) if type(returned_action) in (dict,): stack.append(returned_action) if activity['action_id']: res2 = wkf_expr.execute_action(cr, ident, workitem, activity) # A client action has been returned if res2: stack.append(res2) result=res2 _state_set(cr, workitem, activity, 'complete', ident) elif activity['kind']=='stopall': if workitem['state']=='active': _state_set(cr, workitem, activity, 'running', ident) cr.execute('delete from wkf_workitem where inst_id=%s and id<>%s', (workitem['inst_id'], workitem['id'])) if activity['action']: wkf_expr.execute(cr, ident, workitem, activity) _state_set(cr, workitem, activity, 'complete', ident) elif activity['kind']=='subflow': if workitem['state']=='active': _state_set(cr, workitem, activity, 'running', ident) if activity.get('action', False): id_new = wkf_expr.execute(cr, ident, workitem, activity) if not id_new: cr.execute('delete from wkf_workitem where id=%s', (workitem['id'],)) return False assert type(id_new)==type(1) or type(id_new)==type(1L), 'Wrong return value: '+str(id_new)+' '+str(type(id_new)) cr.execute('select id from wkf_instance where res_id=%s and wkf_id=%s', (id_new,activity['subflow_id'])) id_new = cr.fetchone()[0] else: id_new = instance.create(cr, ident, activity['subflow_id']) cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (id_new, workitem['id'])) workitem['subflow_id'] = id_new if workitem['state']=='running': cr.execute("select state from wkf_instance where id=%s", (workitem['subflow_id'],)) state= cr.fetchone()[0] if state=='complete': _state_set(cr, workitem, activity, 'complete', ident) for t in signal_todo: instance.validate(cr, t[0], t[1], t[2], force_running=True) return result
def main(): app = create() manager = Manager(app) manager.run()
def _execute(cr, workitem, activity, ident, stack): """ Execute wkf_activity's action on a given workitem @param cr: database handle @param workitem: dict of the wkf_workitem to process @param activity: dict of the wkf_activity @param ident: tuple of (uid, dotted model name, resource id ) @param stack: ??? There are 4 different types of activities: * dummy: blank state, no processing required stack is used to return data from an ir.act.server function call if action_id is set * function: executes a particular function on a workitem note that while the function is executing the system will mark the wkf_workitem's state as 'running' to prevent doubled up work * stopall: deletes the current workitem from the database * subflow: creates a new subflow if required """ result = True # # send a signal to parent workflow (signal: subflow.signal_name) # signal_todo = [] if (workitem['state'] == 'active') and activity['signal_send']: cr.execute( "select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (workitem['inst_id'], )) for i in cr.fetchall(): signal_todo.append( (i[0], (ident[0], i[1], i[2]), activity['signal_send'])) if config['debug_workflow']: exec_started = datetime.datetime.now() _logger.debug(" execute {a[kind]} {i[1]},{i[2]} {d}".format( i=ident, w=workitem, a=activity, d=exec_started)) # ACTIVITY: dummy if activity['kind'] == 'dummy': if workitem['state'] == 'active': _state_set(cr, workitem, activity, 'complete', ident) if activity['action_id']: res2 = wkf_expr.execute_action(cr, ident, workitem, activity) if res2: stack.append(res2) result = res2 # ACTIVITY: function elif activity['kind'] == 'function': if workitem['state'] == 'active': if config['debug_workflow']: _logger.debug(" function {i[1]},{i[2]}: {a[action]}".format( i=ident, w=workitem, a=activity)) _state_set(cr, workitem, activity, 'running', ident) returned_action = wkf_expr.execute(cr, ident, workitem, activity) if type(returned_action) in (dict, ): stack.append(returned_action) if activity['action_id']: res2 = wkf_expr.execute_action(cr, ident, workitem, activity) # A client action has been returned if res2: stack.append(res2) result = res2 _state_set(cr, workitem, activity, 'complete', ident) # ACTIVITY: stopall elif activity['kind'] == 'stopall': if workitem['state'] == 'active': _state_set(cr, workitem, activity, 'running', ident) cr.execute('delete from wkf_workitem where inst_id=%s and id<>%s', (workitem['inst_id'], workitem['id'])) if activity['action']: wkf_expr.execute(cr, ident, workitem, activity) _state_set(cr, workitem, activity, 'complete', ident) # ACTIVITY: subflow elif activity['kind'] == 'subflow': # If the state is currently marked as 'active' it means that the node # has yet to start the subflow. So let's create it. if workitem['state'] == 'active': _state_set(cr, workitem, activity, 'running', ident) # action will be a python function on the wkf_instance's res_type model # What we expect from this function is the new target record's ID # As we expect that the new record will have its own workflow, we just # need to know what workflow this object will be launched into. We know # the new record's workflow id because the current wkf_activity has # wkf_activity.subflow_id references the target sub-object's wkf record if activity.get('action', False): id_new = wkf_expr.execute(cr, ident, workitem, activity) # If we don't get a new record id, stop the workflow and drop out if not (id_new): cr.execute('delete from wkf_workitem where id=%s', (workitem['id'], )) return False assert type(id_new) == type(1) or type(id_new) == type( 1L), 'Wrong return value: ' + str(id_new) + ' ' + str( type(id_new)) cr.execute( 'select id from wkf_instance where res_id=%s and wkf_id=%s', (id_new, activity['subflow_id'])) id_new = cr.fetchone()[0] # If there is no function, we just put the current object on the subflow. # TODO: this seems a little perilous if the workflow references some other model else: id_new = instance.create(cr, ident, activity['subflow_id']) cr.execute('update wkf_workitem set subflow_id=%s where id=%s', (id_new, workitem['id'])) workitem['subflow_id'] = id_new # If the state is running, we're waiting for the subflow to complete. Check the subflow # to see what the status it happens to be and if the subflow is marked 'complete', we can # finally mark this activity as complete as well if workitem['state'] == 'running': cr.execute("select state from wkf_instance where id=%s", (workitem['subflow_id'], )) state = cr.fetchone()[0] if state == 'complete': _state_set(cr, workitem, activity, 'complete', ident) for t in signal_todo: instance.validate(cr, t[0], t[1], t[2], force_running=True) if config['debug_workflow']: _logger.debug(" /execute {a[kind]} {i[1]},{i[2]} started {d}".format( i=ident, w=workitem, a=activity, d=exec_started)) return result
def main(): try: config.fragment['Resources'] = {} config.fragment['Outputs'] = {} vpcname = config.templateParameterValues['VpcName'] vpccidr = {'Ref': 'VpcCidr'} vpcasn = config.templateParameterValues['VpcASN'] onpremname = config.templateParameterValues['OnpremName'] onpremcidr = {'Ref': 'OnpremCidr'} onpremasn = config.templateParameterValues['OnpremASN'] ami = {'Ref': 'LatestAmiId'} traffsize = {'Ref': 'traffsize'} vpnsize = {'Ref': 'OnpremVPNsize'} vpnflav = config.templateParameterValues['OnpremVPNflav'] # create VPC action = createvpc.main(vpcname, vpccidr, 'No', '') config.logger.info('Response: {}'.format(action)) # create VPC Route Table action = createroutetable.main(vpcname, 'Default', 'No') config.logger.info('Response: {}'.format(action)) # create VPC igw action = gateway.igw(vpcname, 'No', 'Vpc' + vpcname) config.logger.info('Response: {}'.format(action)) # create VPC default route action = route.addv4('PubDefaultIpv4', '0.0.0.0/0', 'RTDefault' + vpcname, 'GatewayId', 'IGW' + vpcname) config.logger.info('Response: {}'.format(action)) # create VPC subnets subcidr = {"Fn::Select": [0, {"Fn::Cidr": [vpccidr, 6, 6]}]} az = {"Fn::Select": [0, {"Fn::GetAZs": {"Ref": "AWS::Region"}}]} action = createsubnet.static(vpcname, 'Subnet1', subcidr, az, 'No', 'Pub', '', 'RTDefault' + vpcname, 'Vpc' + vpcname) config.logger.info('Response: {}'.format(action)) subcidr = {"Fn::Select": [1, {"Fn::Cidr": [vpccidr, 6, 6]}]} az = {"Fn::Select": [1, {"Fn::GetAZs": {"Ref": "AWS::Region"}}]} action = createsubnet.static(vpcname, 'Subnet2', subcidr, az, 'No', 'Pub', '', 'RTDefault' + vpcname, 'Vpc' + vpcname) config.logger.info('Response: {}'.format(action)) #create OnPrem action = createvpc.main(onpremname, onpremcidr, 'No', '') config.logger.info('Response: {}'.format(action)) # create OnPrem Route Table action = createroutetable.main(onpremname, 'Default', 'No') config.logger.info('Response: {}'.format(action)) # create OnPrem igw action = gateway.igw(onpremname, 'No', 'Vpc' + onpremname) config.logger.info('Response: {}'.format(action)) # create OnPrem default route action = route.addv4('PubDefaultIpv4', '0.0.0.0/0', 'RTDefault' + onpremname, 'GatewayId', 'IGW' + onpremname) config.logger.info('Response: {}'.format(action)) # create OnPrem subnets subcidr = {"Fn::Select": [0, {"Fn::Cidr": [onpremcidr, 6, 6]}]} az = {"Fn::Select": [0, {"Fn::GetAZs": {"Ref": "AWS::Region"}}]} action = createsubnet.static(onpremname, 'Subnet1', subcidr, az, 'No', 'Pub', '', 'RTDefault' + onpremname, 'Vpc' + onpremname) config.logger.info('Response: {}'.format(action)) subcidr = {"Fn::Select": [1, {"Fn::Cidr": [onpremcidr, 6, 6]}]} az = {"Fn::Select": [1, {"Fn::GetAZs": {"Ref": "AWS::Region"}}]} action = createsubnet.static(onpremname, 'Subnet2', subcidr, az, 'No', 'Pub', '', 'RTDefault' + onpremname, 'Vpc' + onpremname) config.logger.info('Response: {}'.format(action)) # allocate EIP OnPrem VPNSRV action = gateway.eip(onpremname, 'VPNSRV', 'Vpc' + onpremname) config.logger.info('Response: {}'.format(action)) # create vgw action = gateway.vgw('VGW', vpcasn, 'MyVGW', 'ipsec.1', 'Vpc' + vpcname) config.logger.info('Response: {}'.format(action)) mygw = {'Ref': 'VGW'} # attach vgw on VPC vpcid = {'Ref': 'Vpc' + vpcname} action = gateway.vgwattch('VGWATTC' + vpcname, mygw, vpcid) config.logger.info('Response: {}'.format(action)) # create cgw peerip = {'Ref': 'EIP' + onpremname + 'VPNSRV'} action = gateway.cgw('CGW', onpremasn, peerip, 'ipsec.1', 'EIP' + onpremname + 'VPNSRV') config.logger.info('Response: {}'.format(action)) cgw = {'Ref': 'CGW'} vpntype = 'VGW' # create default vpn dep = ['VGW', 'CGW'] if onpremasn == '0' and vpcasn == '0': action = gateway.vpn('VPN', cgw, 0, mygw, vpntype, dep) config.logger.info('Response: {}'.format(action)) else: action = gateway.vpn('VPN', cgw, 1, mygw, vpntype, dep) config.logger.info('Response: {}'.format(action)) # VPN Route adjust on VPC if onpremasn == '0' and vpcasn == '0': # add static route vpnrt = {"Ref": "OnpremCidr"} vpnid = {"Ref": "VPN"} action = route.vpn('MyGWRoutes', vpnrt, vpnid, 'VPN') config.logger.info('Response: {}'.format(action)) # route propagation from vgw rtids = [{'Ref': 'RTDefault' + vpcname}] dep = ['VGWATTC' + vpcname, 'RTDefault' + vpcname] action = route.prop('MyGWRoutes', rtids, mygw, dep) config.logger.info('Response: {}'.format(action)) # create iam role for deploy vpn pol = { "PolicyName": "DescribeVPNConn", "PolicyDocument": { "Version": "2012-10-17", "Statement": [{ "Effect": "Allow", "Action": ["ec2:DescribeVpnConnections"], "Resource": "*" }] } } action = createiamrole.pol('DescribeVPNConn', 'ec2.amazonaws.com', pol, '') config.logger.info('Response: {}'.format(action)) # create instance profile for vpn server action = createinstprof.main('InstProfVPNSrv', 'DescribeVPNConn', 'yes') config.logger.info('Response: {}'.format(action)) # create security group for test instances action = securitygroup.create(vpcid, 'InstanceTest' + vpcname) config.logger.info('Response: {}'.format(action)) vpcid = {'Ref': 'Vpc' + onpremname} action = securitygroup.create(vpcid, 'InstanceTest' + onpremname) config.logger.info('Response: {}'.format(action)) # create security group for vpn server action = securitygroup.create(vpcid, 'VPNSrv' + onpremname) config.logger.info('Response: {}'.format(action)) # create rules to access test instances from AWS Office with open('zonemap.cfg') as zonefile: zonemap = config.json.load(zonefile) srcprefix = zonemap['Mappings']['RegionMap'][ config.region]['PREFIXLIST'] action = securitygroup.addingress( 'SecG' + 'InstanceTest' + vpcname, srcprefix, 'SourcePrefixListId', '-1', '', '', '') config.logger.info('Response: {}'.format(action)) action = securitygroup.addingress( 'SecG' + 'InstanceTest' + onpremname, srcprefix, 'SourcePrefixListId', '-1', '', '', '') config.logger.info('Response: {}'.format(action)) # create rules to access test instances from each other action = securitygroup.addingress('SecG' + 'InstanceTest' + vpcname, onpremcidr, 'CidrIp', '-1', '', '', '') config.logger.info('Response: {}'.format(action)) action = securitygroup.addingress('SecG' + 'InstanceTest' + vpcname, vpccidr, 'CidrIp', '-1', '', '', '') config.logger.info('Response: {}'.format(action)) action = securitygroup.addingress('SecG' + 'InstanceTest' + onpremname, onpremcidr, 'CidrIp', '-1', '', '', '') config.logger.info('Response: {}'.format(action)) action = securitygroup.addingress('SecG' + 'InstanceTest' + onpremname, vpccidr, 'CidrIp', '-1', '', '', '') config.logger.info('Response: {}'.format(action)) # create rules to VPN SRV action = securitygroup.addingress('SecG' + 'VPNSrv' + onpremname, '0.0.0.0/0', 'CidrIp', 'icmp', '-1', '-1', 'icmp') config.logger.info('Response: {}'.format(action)) action = securitygroup.addingress('SecG' + 'VPNSrv' + onpremname, '0.0.0.0/0', 'CidrIp', 'udp', '500', '500', 'ike') config.logger.info('Response: {}'.format(action)) action = securitygroup.addingress('SecG' + 'VPNSrv' + onpremname, '0.0.0.0/0', 'CidrIp', 'udp', '4500', '4500', 'IPsec NAT traversal') config.logger.info('Response: {}'.format(action)) action = securitygroup.addingress('SecG' + 'VPNSrv' + onpremname, '0.0.0.0/0', 'CidrIp', '50', '', '', 'ESP') config.logger.info('Response: {}'.format(action)) # create instance test VPC vpcintproper = {} vpcintproper['DisableApiTermination'] = {} vpcintproper['DisableApiTermination'] = 'false' vpcintproper['InstanceInitiatedShutdownBehavior'] = {} vpcintproper['InstanceInitiatedShutdownBehavior'] = 'terminate' vpcintproper['NetworkInterfaces'] = [] vpcintproper['NetworkInterfaces'] = [{ 'AssociatePublicIpAddress': 'true', 'DeviceIndex': 0, 'DeleteOnTermination': 'true', 'SubnetId': { 'Ref': vpcname + 'Subnet1' }, 'GroupSet': [{ 'Ref': 'SecG' + 'InstanceTest' + vpcname }] }] vpcintproper['ImageId'] = {} vpcintproper['ImageId'] = ami vpcintproper['InstanceType'] = {} vpcintproper['InstanceType'] = traffsize vpcintproper['Monitoring'] = {} vpcintproper['Monitoring'] = 'false' userdata = { "Fn::Base64": { "Fn::Join": [ "", [ "#!/bin/bash -xe\n", "amazon-linux-extras install -y epel\n", "yum install -y openssl-devel xz xz-devel libffi-devel findutils wireshark tcpdump whois nuttcp iperf3 hping3 nmap sipcalc mtr bind-utils telnet\n", "yum update -y\n", "echo 'Ch@ng£m3' | passwd --stdin ec2-user\n", "echo 'ClientAliveInterval 60' | tee --append /etc/ssh/sshd_config\n", "echo 'ClientAliveCountMax 2' | tee --append /etc/ssh/sshd_config\n", "sed -i 's/^PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config\n", "systemctl restart sshd.service\n", "reboot\n" ] ] } } vpcintproper['UserData'] = userdata vpcintproper['Tags'] = [] vpcintproper['Tags'] = [{'Key': 'Name', 'Value': 'InstTest' + vpcname}] dep = [vpcname + 'Subnet1', 'SecG' + 'InstanceTest' + vpcname] action = instance.create('InstTest' + vpcname, vpcintproper, dep) config.logger.info('Response: {}'.format(action)) # create instance test Onprem onpremintproper = {} onpremintproper['DisableApiTermination'] = {} onpremintproper['DisableApiTermination'] = 'false' onpremintproper['InstanceInitiatedShutdownBehavior'] = {} onpremintproper['InstanceInitiatedShutdownBehavior'] = 'terminate' onpremintproper['NetworkInterfaces'] = [] onpremintproper['NetworkInterfaces'] = [{ 'AssociatePublicIpAddress': 'true', 'DeviceIndex': 0, 'DeleteOnTermination': 'true', 'SubnetId': { 'Ref': onpremname + 'Subnet1' }, 'GroupSet': [{ 'Ref': 'SecG' + 'InstanceTest' + onpremname }] }] onpremintproper['ImageId'] = {} onpremintproper['ImageId'] = ami onpremintproper['InstanceType'] = {} onpremintproper['InstanceType'] = traffsize onpremintproper['Monitoring'] = {} onpremintproper['Monitoring'] = 'false' userdata = { "Fn::Base64": { "Fn::Join": [ "", [ "#!/bin/bash -xe\n", "amazon-linux-extras install -y epel\n", "yum install -y openssl-devel xz xz-devel libffi-devel findutils wireshark tcpdump whois nuttcp iperf3 hping3 nmap sipcalc mtr bind-utils telnet\n", "yum update -y\n", "echo 'Ch@ng£m3' | passwd --stdin ec2-user\n", "echo 'ClientAliveInterval 60' | tee --append /etc/ssh/sshd_config\n", "echo 'ClientAliveCountMax 2' | tee --append /etc/ssh/sshd_config\n", "sed -i 's/^PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config\n", "systemctl restart sshd.service\n", "reboot\n" ] ] } } onpremintproper['UserData'] = userdata onpremintproper['Tags'] = [] onpremintproper['Tags'] = [{ 'Key': 'Name', 'Value': 'InstTest' + onpremname }] dep = [onpremname + 'Subnet1', 'SecG' + 'InstanceTest' + onpremname] action = instance.create('InstTest' + onpremname, onpremintproper, dep) config.logger.info('Response: {}'.format(action)) # create instance vpn Onprem onpremintproper = {} onpremintproper['DisableApiTermination'] = {} onpremintproper['DisableApiTermination'] = 'false' onpremintproper['InstanceInitiatedShutdownBehavior'] = {} onpremintproper['InstanceInitiatedShutdownBehavior'] = 'terminate' onpremintproper['SourceDestCheck'] = {} onpremintproper['SourceDestCheck'] = 'false' onpremintproper['IamInstanceProfile'] = {} onpremintproper['IamInstanceProfile'] = {'Ref': 'InstProfVPNSrv'} onpremintproper['NetworkInterfaces'] = [] onpremintproper['NetworkInterfaces'] = [{ 'AssociatePublicIpAddress': 'false', 'DeviceIndex': 0, 'DeleteOnTermination': 'true', 'SubnetId': { 'Ref': onpremname + 'Subnet1' }, 'GroupSet': [{ 'Ref': 'SecG' + 'InstanceTest' + onpremname }, { 'Ref': 'SecG' + 'VPNSrv' + onpremname }] }] onpremintproper['ImageId'] = {} onpremintproper['ImageId'] = ami onpremintproper['InstanceType'] = {} onpremintproper['InstanceType'] = vpnsize onpremintproper['Monitoring'] = {} onpremintproper['Monitoring'] = 'false' if onpremasn == '0' and vpcasn == '0': userdata = { "Fn::Base64": { "Fn::Join": [ "", [ "#!/bin/bash -xe\n", "amazon-linux-extras install -y epel\n", "yum install -y strongswan python2-boto3 python-xmltodict git\n", "aws configure --profile default set region ", { "Ref": "AWS::Region" }, "\n", "yum update -y\n", "git clone https://github.com/mkilikrates/launchvpn.git\n", "cd launchvpn\n", "./vpn-tunnel.py default ", { "Ref": "VPN" }, " static\n", "sleep 3\n", "export REMOTENET=$(sed 's|/|\\\\/|g' <<< ", { 'Ref': 'VpcCidr' }, ")\n", "sed -i \"s/\\ -r a.b.c.d\\/e/\\ -r $REMOTENET/g\" ipsec_conf.txt\n", "cat ipsec_conf.txt >> /etc/strongswan/ipsec.conf\n", "cat ipsec.secrets.txt >> /etc/strongswan/ipsec.secrets\n", "export GATEWAY=$(/sbin/ip route | awk '/default/ { print $3 }')\n", "route add -net ", { "Ref": "OnpremCidr" }, " gw $GATEWAY\n", "echo ", { "Ref": "OnpremCidr" }, " via $GATEWAY >>/etc/sysconfig/network-scripts/route-eth0\n", "cp -f aws-updown.sh /etc/strongswan/ipsec.d/\n", "cp -f heartbeat.sh /etc/strongswan/ipsec.d/\n", "cd ..\n", "rm -rf /launchvpn\n", "systemctl enable strongswan\n", "echo 'Ch@ng£m3' | passwd --stdin ec2-user\n", "echo 'ClientAliveInterval 60' | tee --append /etc/ssh/sshd_config\n", "echo 'ClientAliveCountMax 2' | tee --append /etc/ssh/sshd_config\n", "sed -i 's/^PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config\n", "systemctl restart sshd.service\n", "reboot\n" ] ] } } else: userdata = { "Fn::Base64": { "Fn::Join": [ "", [ "#!/bin/bash -xe\n", "amazon-linux-extras install -y epel\n", "yum install -y strongswan quagga python2-boto3 python-xmltodict git\n", "aws configure --profile default set region ", { "Ref": "AWS::Region" }, "\n", "yum update -y\n", "git clone https://github.com/mkilikrates/launchvpn.git\n", "cd launchvpn\n", "./vpn-tunnel.py default ", { "Ref": "VPN" }, " dynamic\n", "sleep 3\n", "sed -i 's/\\ -r a.b.c.d\\/e//g' ipsec_conf.txt\n", "cat ipsec_conf.txt >> /etc/strongswan/ipsec.conf\n", "cat ipsec.secrets.txt >> /etc/strongswan/ipsec.secrets\n", "sed -i '/^router bgp/a \\ network ", { "Ref": "OnpremCidr" }, "' /launchvpn/bgpd.conf.txt\n", "export GATEWAY=$(/sbin/ip route | awk '/default/ { print $3 }')\n", "route add -net ", { "Ref": "OnpremCidr" }, " gw $GATEWAY\n", "echo ", { "Ref": "OnpremCidr" }, " via $GATEWAY >>/etc/sysconfig/network-scripts/route-eth0\n", "cat bgpd.conf.txt >> /etc/quagga/bgpd.conf\n", "cp -f aws-updown.sh /etc/strongswan/ipsec.d/\n", "cp -f heartbeat.sh /etc/strongswan/ipsec.d/\n", "cd ..\n", "rm -rf /launchvpn\n", "systemctl enable strongswan\n", "systemctl enable zebra\n", "systemctl enable bgpd\n", "echo 'Ch@ng£m3' | passwd --stdin ec2-user\n", "echo 'ClientAliveInterval 60' | tee --append /etc/ssh/sshd_config\n", "echo 'ClientAliveCountMax 2' | tee --append /etc/ssh/sshd_config\n", "sed -i 's/^PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config\n", "systemctl restart sshd.service\n", "reboot\n" ] ] } } onpremintproper['UserData'] = userdata onpremintproper['Tags'] = [] onpremintproper['Tags'] = [{ 'Key': 'Name', 'Value': 'VPNSRV' + onpremname }] dep = [ onpremname + 'Subnet1', 'SecG' + 'InstanceTest' + onpremname, 'InstProfVPNSrv' ] action = instance.create('VPNSRV' + onpremname, onpremintproper, dep) config.logger.info('Response: {}'.format(action)) # attach EIP on VPNSRV allocid = { 'Fn::GetAtt': ['EIP' + onpremname + 'VPNSRV', 'AllocationId'] } instid = {'Ref': 'VPNSRV' + onpremname} dep = ['VPNSRV' + onpremname, 'EIP' + onpremname + 'VPNSRV'] action = gateway.eipass('VPNSRV', instid, '', '', allocid, dep) config.logger.info('Response: {}'.format(action)) # create route to VPC on Onprem action = route.addv4(vpcname, vpccidr, 'RTDefault' + onpremname, 'InstanceId', 'VPNSRV' + onpremname) config.logger.info('Response: {}'.format(action)) action = {} action["statusCode"] = "200" action["body"] = config.json.dumps('Template Update Success!') config.logger.info('Response: {}'.format(action)) return action except Exception as e: action = {} config.logger.error('ERROR: {}'.format(e)) config.traceback.print_exc() action["statusCode"] = "500" action["body"] = str(e) config.logger.info('Response: {}'.format(action)) return action
"""generate base classes/interfaces/handlers from proto-scheme""" proto_gen() @manager.command def docs(): """generate documentation""" base_cwd = os.getcwd() RUN_PATH = os.path.dirname(os.path.abspath(__file__)) DOC_ROOT = os.path.join(RUN_PATH, 'docs') os.chdir(DOC_ROOT) shutil.rmtree('source/autogenerated', ignore_errors=True) subprocess.check_call(['sh', 'build_docs.sh']) subprocess.check_call(['sh', 'build_docs.sh']) os.chdir(base_cwd) @manager.command def runserver(): """run development server""" manager.app = initialize() manager.app.run() manager.run() application = None if __name__ == '__main__': main() elif __name__[0:10] == '_mod_wsgi_': application = create() initialize()
import fs, json import numpy as np from instance import create blueprints = fs.get_blueprints() generators = map(lambda blueprint: create(blueprint), fs.get_blueprints()) def find_generator_by_url(url=""): try: return next(generator for generator in generators if generator.url == url) except StopIteration: return None def get_web_sockets(): return filter(lambda g: g.has_web_socket(), generators) def get_mqtts(): return filter(lambda g: g.has_mqtt(), generators) def handle(action, url, select_options=None): generator = find_generator_by_url(url) if(action == 'generator'): if(generator is None): return 'no generator match for url!' else: select_options = generator.options['listener']['select_options'] limit = select_options['limit'] samples = generator.simulate(limit, select_options) return json.dumps(samples.tolist(), separators=(',', ':'))
def _execute(cr, workitem, activity, ident, stack): result = True # # send a signal to parent workflow (signal: subflow.signal_name) # signal_todo = [] if (workitem["state"] == "active") and activity["signal_send"]: cr.execute( "select i.id,w.osv,i.res_id from wkf_instance i left join wkf w on (i.wkf_id=w.id) where i.id IN (select inst_id from wkf_workitem where subflow_id=%s)", (workitem["inst_id"],), ) for i in cr.fetchall(): signal_todo.append((i[0], (ident[0], i[1], i[2]), activity["signal_send"])) if activity["kind"] == "dummy": if workitem["state"] == "active": _state_set(cr, workitem, activity, "complete", ident) if activity["action_id"]: res2 = wkf_expr.execute_action(cr, ident, workitem, activity) if res2: stack.append(res2) result = res2 elif activity["kind"] == "function": if workitem["state"] == "active": _state_set(cr, workitem, activity, "running", ident) returned_action = wkf_expr.execute(cr, ident, workitem, activity) if type(returned_action) in (dict,): stack.append(returned_action) if activity["action_id"]: res2 = wkf_expr.execute_action(cr, ident, workitem, activity) # A client action has been returned if res2: stack.append(res2) result = res2 _state_set(cr, workitem, activity, "complete", ident) elif activity["kind"] == "stopall": if workitem["state"] == "active": _state_set(cr, workitem, activity, "running", ident) cr.execute("delete from wkf_workitem where inst_id=%s and id<>%s", (workitem["inst_id"], workitem["id"])) if activity["action"]: wkf_expr.execute(cr, ident, workitem, activity) _state_set(cr, workitem, activity, "complete", ident) elif activity["kind"] == "subflow": if workitem["state"] == "active": _state_set(cr, workitem, activity, "running", ident) if activity.get("action", False): id_new = wkf_expr.execute(cr, ident, workitem, activity) if not (id_new): cr.execute("delete from wkf_workitem where id=%s", (workitem["id"],)) return False assert type(id_new) == type(1) or type(id_new) == type(1L), ( "Wrong return value: " + str(id_new) + " " + str(type(id_new)) ) cr.execute( "select id from wkf_instance where res_id=%s and wkf_id=%s", (id_new, activity["subflow_id"]) ) id_new = cr.fetchone()[0] else: id_new = instance.create(cr, ident, activity["subflow_id"]) cr.execute("update wkf_workitem set subflow_id=%s where id=%s", (id_new, workitem["id"])) workitem["subflow_id"] = id_new if workitem["state"] == "running": cr.execute("select state from wkf_instance where id=%s", (workitem["subflow_id"],)) state = cr.fetchone()[0] if state == "complete": _state_set(cr, workitem, activity, "complete", ident) for t in signal_todo: instance.validate(cr, t[0], t[1], t[2], force_running=True) return result