def test_no_file_throws_IOError(self): try: load_stack("akldsjfalksjf") self.fail("Should never get her - File shouldn't exist") except IOError: pass except: self.fail("Should never get here - Should have thrown an IO Exception")
def test_package_manager_after_package(self): input_yaml = """\ packages: unix: manual-anchor: &manual-anchor yum: not-manual-anchor mysql: mysql-devel: brew: mysql depends-on-package: brew: brew-package depends-on: - *manual-anchor - *mysql depends-on-package-2: depends-on: - *mysql-devel dep-on-dep-on-2: depends-on: - *depends-on-package-2 package-managers: unix: yum: """ self.maxDiff = None f = open(os.path.abspath(os.path.join(os.environ["TMPDIR"], "stacktest_pm_second.stack")), "w") f.write(input_yaml) f.close() # import pdb; pdb.set_trace() self.assertEqual(self.output, load_stack("stacktest_pm_second")) os.remove(f.name)
def test_include_at_second_level(self): try: f = write_tmp_file("only_include.stack", "first:\n inline: included") i = write_tmp_file("included.stack", "blah: not-blah") # import pdb; pdb.set_trace() self.assertEquals({"first": {"blah": "not-blah"}}, load_stack("only_include")) finally: remove_tmp_file(f) remove_tmp_file(i)
def test_only_include(self): try: f = write_tmp_file("only_include.stack", "inline: included") i = write_tmp_file("included.stack", "blah: not-blah") # import pdb; pdb.set_trace() self.assertEquals({"blah": "not-blah"}, load_stack("only_include")) finally: remove_tmp_file(f) remove_tmp_file(i)
def test_load_from_file(self): self.assertEqual({"blah": {"not-blah": "hello"}}, load_stack("stacktest"))
def test_multiple_package_managers_and_include(self): try: f = write_tmp_file( "parent.stack", """\ inline: package-managers packages: unix: inline: unix-packages python: inline: python-packages perl: inline: perl-packages """, ) pm = write_tmp_file( "package-managers.stack", """\ package-managers: unix: yum: blah perl: cpan: blah python: pip: blah """, ) up = write_tmp_file( "unix-packages.stack", """\ mysql: """, ) py = write_tmp_file( "python-packages.stack", """\ boto: """, ) pe = write_tmp_file( "perl-packages.stack", """\ perl-pack: """, ) self.assertEquals( { "package-managers": {"unix": {"yum": "blah"}, "perl": {"cpan": "blah"}, "python": {"pip": "blah"}}, "packages": { "unix": {"mysql": {"yum": "mysql"}}, "python": {"boto": {"pip": "boto"}}, "perl": {"perl-pack": {"cpan": "perl-pack"}}, }, }, load_stack("parent"), ) finally: remove_tmp_file(f) remove_tmp_file(pm) remove_tmp_file(up) remove_tmp_file(py) remove_tmp_file(pe)
def test_mutual_depends_ons(self): try: f = write_tmp_file( "parent.stack", """\ packages: unix: inline: included a: depends-on: - *b """, ) i = write_tmp_file( "included.stack", """\ b: """, ) # import pdb; pdb.set_trace() self.assertEquals({"packages": {"unix": {"a": {"depends-on": [None]}, "b": None}}}, load_stack("parent")) finally: remove_tmp_file(f) remove_tmp_file(i)
def test_bad_anchor_second_level(self): try: load_stack("bad_yaml_second_level") self.fail("Should have thrown an exception") except: pass
def test_automatic_package_anchors(self): # import pdb; pdb.set_trace() self.maxDiff = None self.assertEqual(self.output, load_stack("stacktest"))
def convert_to_cloud_init(stack_name, environment, interactive=False): # configure django so we can use templates # django.conf.settings.configure() stack = load_stack(stack_name) device_names = stack["environments"][environment]["devices"].keys() print device_names all_service_names = [s["services"] for s in stack["environments"][environment]["devices"].values()][0] print all_service_names firewall_rules = stack["firewall"] print firewall_rules template = {} # Add Template version template["AWSTemplateFormatVersion"] = "2010-09-09" # Add Parameters to the script params = stack["global"].get("parameters", {}) for s in all_service_names: params.update(stack["services"][s].get("parameters", {})) # convert keys name for k, v in params.iteritems(): for to_change in v.keys(): if params[k][to_change] in convert_values["parameters"]: params[k][to_change] = convert_values["parameters"][params[k][to_change]] if to_change in convert_keys["parameters"]: params[k][convert_keys["parameters"][to_change]] = params[k][to_change] del (params[k][to_change]) template["Parameters"] = params # Add mappings to script # First the Instance Type to Arch mappings = {} instance2Arch = stack["instance-types"] for k, v in instance2Arch.iteritems(): instance2Arch[k] = {"Arch": str(v)} mappings["AWSInstanceType2Arch"] = instance2Arch # Second Region / Type to AMI regionArch2Ami = stack["ami-mappings"] for region_name, region_values in regionArch2Ami.iteritems(): for k, v in region_values.iteritems(): del (regionArch2Ami[region_name][k]) region_values[str(k)] = v mappings["AWSRegionArch2AMI"] = regionArch2Ami template["Mappings"] = mappings # Add Resources # Cloudinit needs a IAM::User to have access to the script resources = {} resources["CfnUser"] = { "Type": "AWS::IAM::User", "Properties": { "Path": "/", "Policies": [ { "PolicyName": "root", "PolicyDocument": { "Statement": [ {"Effect": "Allow", "Action": "cloudformation:DescribeStackResource", "Resource": "*"} ] }, } ], }, } resources["S3User"] = { "Type": "AWS::IAM::User", "Properties": { "Path": "/", "Policies": [ { "PolicyName": "s3get", "PolicyDocument": { "Statement": [{"Effect": "Allow", "Action": ["s3:Get*", "s3:List*"], "Resource": "*"}] }, } ], }, } resources["HostKeys"] = {"Type": "AWS::IAM::AccessKey", "Properties": {"UserName": {"Ref": "CfnUser"}}} resources["S3UserKeys"] = {"Type": "AWS::IAM::AccessKey", "Properties": {"UserName": {"Ref": "S3User"}}} # Add Security Groups for name in all_service_names: service_rules = stack["services"][name].get("firewall", None) translated_rules = [] if service_rules: for k, v in service_rules.iteritems(): try: for port in v: translated_rules.append((firewall_rules[k], port)) except: pass sg = { "Type": "AWS::EC2::SecurityGroup", "Properties": {"SecurityGroupIngress": [], "GroupDescription": "Access Rules"}, } for r in translated_rules: sg["Properties"]["SecurityGroupIngress"].append( {"IpProtocol": "tcp", "FromPort": r[1], "ToPort": r[1], "CidrIp": r[0]} ) resources[stack_key_to_cf_key(name) + "SecurityGroup"] = sg for device in device_names: resource_name = device.title() + "Server" device_size = stack["environments"][environment]["devices"][device]["size"] user_data_script = [] # Volumes zone_letter = stack["environments"][environment]["devices"][device].get("zone-letter", None) storage = stack["environments"][environment]["devices"][device].get("storage", None) instance_volumes = [] if storage and zone_letter: for vnum in range(0, storage["number"]): vol_name = resource_name + "Volume" + str(vnum) resources[vol_name] = { "Type": "AWS::EC2::Volume", "Properties": { "Size": storage["size"], "AvailabilityZone": {"Fn::Join": ["", [{"Ref": "AWS::Region"}, zone_letter]]}, }, } instance_volumes.append( {"VolumeId": {"Ref": vol_name}, "Device": "/dev/sd" + chr(ord(storage["initial-drive"]) + vnum)} ) # Instances s = { "Type": "AWS::EC2::Instance", "Properties": { "ImageId": { "Fn::FindInMap": [ "AWSRegionArch2AMI", {"Ref": "AWS::Region"}, {"Fn::FindInMap": ["AWSInstanceType2Arch", device_size, "Arch"]}, ] }, "InstanceType": device_size, "AvailabilityZone": {"Fn::Join": ["", [{"Ref": "AWS::Region"}, zone_letter]]}, "SecurityGroups": [], "KeyName": {"Ref": "KeyName"}, "UserData": {"Fn::Base64": {"Fn::Join": ["", user_data_script]}}, }, } if instance_volumes: s["Properties"]["Volumes"] = instance_volumes user_data_script.append("#!/bin/bash\n") user_data_script.append("yum update -y aws-cfn-bootstrap\n") user_data_script.append("sed -i 's/^enabled=0/enabled=1/' /etc/yum.repos.d/epel.repo\n") user_data_script.append( "sed -i 's/easy_install/\/opt\/aws\/bin\/pip_install/' /usr/lib/python2.6/site-packages/cfnbootstrap/lang_package_tools.py\n" ) user_data_script.append("easy_install pip\n") user_data_script.append("cat<<EOF> /opt/aws/bin/pip_install\n") user_data_script.append("#!/bin/bash\n") user_data_script.append("\n") user_data_script.append("pip install \$@\n") user_data_script.append("EOF\n") user_data_script.append("chmod 755 /opt/aws/bin/pip_install\n") user_data_script.append("/opt/aws/bin/cfn-init -s ") user_data_script.append({"Ref": "AWS::StackName"}) user_data_script.append(" -r %s " % resource_name) user_data_script.append(" --access-key ") user_data_script.append({"Ref": "HostKeys"}) user_data_script.append(" --secret-key ") user_data_script.append({"Fn::GetAtt": ["HostKeys", "SecretAccessKey"]}) user_data_script.append(" --region ") user_data_script.append({"Ref": "AWS::Region"}) user_data_script.append(" --verbose 2>&1 \n") user_data_script.append("export AWS_ACCESS_KEY_ID=") user_data_script.append({"Ref": "S3UserKeys"}) user_data_script.append("\n") user_data_script.append("export AWS_SECRET_ACCESS_KEY=") user_data_script.append({"Fn::GetAtt": ["S3UserKeys", "SecretAccessKey"]}) user_data_script.append("\n") # assign security groups to box for service in stack["environments"][environment]["devices"][device]["services"]: sg_name = stack_key_to_cf_key(service) + "SecurityGroup" if resources[sg_name]: s["Properties"]["SecurityGroups"].append({"Ref": sg_name}) # collect packages to be installed ci_packages = {} for pm_type, mgr in stack["available-package-managers"].iteritems(): if pm_type in ("unix", "python", "ruby"): packages = [] for service in all_service_names: packages.extend( [name[mgr] for name in stack["services"][service]["depends-on"] if name.get(mgr, None)] ) if packages: ci_packages[stack_mgr_to_ci_mgr(mgr)] = {} for package in set(packages): ci_packages[stack_mgr_to_ci_mgr(mgr)][package] = [] else: pkg_template = Template(stack["package-managers"][pm_type][mgr]) # print [pkg_template.render({'package': name[mgr] }) for name in stack['services'][service]['depends-on'] if name.get(mgr,None) ] for service in all_service_names: print service for x in [ pkg_template.render(Context({"package": name[mgr]})) for name in stack["services"][service]["depends-on"] if name.get(mgr, None) ]: if x: user_data_script.append(x) # Add Files to the devices # First Collect all files from services files = {} for service_name in stack["environments"][environment]["devices"][device]["services"]: service = stack["services"][service_name] try: for fname, fvalue in service["files"].iteritems(): # files[service_name+'-'+fname] = fvalue key = fvalue["location"] + "/" + fname files[key] = {} files[key]["mode"] = "000" + str(fvalue["permissions"]) files[key]["owner"] = fvalue["owner"].split("/")[0] files[key]["group"] = fvalue["owner"].split("/")[1] files[key]["content"] = {"Fn::Join": ["\n", fvalue["contents"].split("\n")]} except: pass for service_name in stack["environments"][environment]["devices"][device]["services"]: service = stack["services"][service_name] try: user_data_script.append(service["configuration"]) except: pass s["Metadata"] = {"AWS::CloudFormation::Init": {"config": {"packages": ci_packages, "files": files}}} resources[resource_name] = s template["Resources"] = resources if interactive: print json.dumps(template, indent=2) else: return json.dumps(template, indent=2)