def run(self, args):
        # Here is the core of the plugin.
        # After doing your verifications, escape by doing:
        # self.exit(return_code, 'return_message', *performance_data)

        acceskey = args.get('acceskey')
        secretkey = args.get('secretkey')

        if acceskey is not None and secretkey is not None:
            ec2 = boto.connect_ec2(
                aws_access_key_id=acceskey,
                aws_secret_access_key=secretkey
            )
        else:
            ec2 = boto.connect_ec2()

        callers = ec2.get_all_instances(filters={'tag-value': args.get('tag')})
        ips = [r.instances[0].ip_address for r in callers]

        values = []
        for ip in ips:
            try:
                r = requests.get('http://%s:%s%s' % (
                    ip,
                    args.get('port'),
                    args.get('endpoint')
                ))
                value = int(r.json()[args.get('attribute')])
                values.append(value)
            except requests.exceptions.RequestException, e:
                self.exit(STATES.UNKNOWN, "UNKNOWN - %s" % e, [])
Example #2
0
def connect():
    if AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY:
        conn = boto.connect_ec2( aws_access_key_id=AWS_ACCESS_KEY_ID,
                                aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
    else:
        conn = boto.connect_ec2()
    return conn
Example #3
0
def test_context_manager():
    conn = boto.connect_ec2('the_key', 'the_secret')
    conn.get_all_instances.when.called_with().should.throw(EC2ResponseError)

    with mock_ec2():
        conn = boto.connect_ec2('the_key', 'the_secret')
        list(conn.get_all_instances()).should.equal([])

    conn.get_all_instances.when.called_with().should.throw(EC2ResponseError)
def test_context_manager():
    conn = boto.connect_ec2('the_key', 'the_secret')
    with assert_raises(EC2ResponseError):
        conn.get_all_instances()

    with mock_ec2():
        conn = boto.connect_ec2('the_key', 'the_secret')
        list(conn.get_all_instances()).should.equal([])

    with assert_raises(EC2ResponseError):
        conn.get_all_instances()
Example #5
0
def test_decorator_start_and_stop():
    conn = boto.connect_ec2('the_key', 'the_secret')
    conn.get_all_instances.when.called_with().should.throw(EC2ResponseError)

    mock = mock_ec2()
    mock.start()
    conn = boto.connect_ec2('the_key', 'the_secret')
    list(conn.get_all_instances()).should.equal([])
    mock.stop()

    conn.get_all_instances.when.called_with().should.throw(EC2ResponseError)
Example #6
0
def test_decorator_start_and_stop():
    conn = boto.connect_ec2('the_key', 'the_secret')
    with assert_raises(EC2ResponseError):
        conn.get_all_instances()

    mock = mock_ec2()
    mock.start()
    conn = boto.connect_ec2('the_key', 'the_secret')
    list(conn.get_all_instances()).should.equal([])
    mock.stop()

    with assert_raises(EC2ResponseError):
        conn.get_all_instances()
Example #7
0
def get_connection(label, key, secret, ec2_url, validation):
    """
    Create EC2 API connection
    @type   label: String
    @param  label: label

    @type   key: String
    @param  key: secret id

    @type   secret: String
    @param  secret: secret string

    @type   ec2_url: String
    @param  ec2_url: Ec2 Service URL

    @type   validation: Boolean
    @param  validation: 

    @rtype : Ec2 Connection
    @return: ec2 connection
    """
    url = urlparse(ec2_url)
    region = RegionInfo(name=label, endpoint=url.hostname)
    if url.scheme == 'https':
        secure = True
    else:
        secure = False
    if url.path == '' or url.path == '/':
        path = '/'
    else:
        path = url.path
    
    # Workaround: Boto 2.6.0 SSL cert validation breaks with Python < 2.7.3
    (major, minor, patchlevel) = sys.version_info[:3]
    if not validation or (major < 3 and (minor < 7 or (minor == 7 and patchlevel < 3))):
        conn = boto.connect_ec2(aws_access_key_id=key,
                                aws_secret_access_key=secret,
                                is_secure=secure,
                                region=region,
                                port=url.port,
                                path=path,
                                validate_certs=False)
    else:
        conn = boto.connect_ec2(aws_access_key_id=key,
                                aws_secret_access_key=secret,
                                is_secure=secure,
                                region=region,
                                port=url.port,
                                path=path)

    return conn
Example #8
0
def terminate_instance(instance_id):
	"""
	terminate EC2 instance with given ID
	"""
	print "... terminating EC2 instance ..."
	conn = boto.connect_ec2(ec2_key,ec2_secret)
	conn.terminate_instances(instance_ids=[instance_id])
Example #9
0
def test_describe_instance_health():
    ec2_conn = boto.connect_ec2()
    reservation = ec2_conn.run_instances('ami-1234abcd', 2)
    instance_id1 = reservation.instances[0].id
    instance_id2 = reservation.instances[1].id

    conn = boto.connect_elb()
    zones = ['us-east-1a', 'us-east-1b']
    ports = [(80, 8080, 'http'), (443, 8443, 'tcp')]
    lb = conn.create_load_balancer('my-lb', zones, ports)

    instances_health = conn.describe_instance_health('my-lb')
    instances_health.should.be.empty

    lb.register_instances([instance_id1, instance_id2])

    instances_health = conn.describe_instance_health('my-lb')
    instances_health.should.have.length_of(2)
    for instance_health in instances_health:
        instance_health.instance_id.should.be.within([instance_id1, instance_id2])
        instance_health.state.should.equal('InService')

    instances_health = conn.describe_instance_health('my-lb', [instance_id1])
    instances_health.should.have.length_of(1)
    instances_health[0].instance_id.should.equal(instance_id1)
    instances_health[0].state.should.equal('InService')
Example #10
0
def launch_instance(ec2_ami, ec2_key, ec2_secret,
					ec2_group, ec2_key_pair, ec2_tag):
	
	"""
	launch EC2 instance with given AMI, 
	AWS_access_key and secret_key, 
	the security group want to assign for the new generated VM, 
	the key pair name,
	and the tag. 
	"""
	print "... creating EC2 instance ..."
	
	# setup connection
	conn = boto.connect_ec2(ec2_key,ec2_secret)
	
	# get security  groups, if the group does not exist, create it
	try:
		group= conn.get_all_security_groups(groupnames=[ec2_group])[0]
		print " Security Groups: %s " % group 
	except conn.ResponseError, e:
		if e.code == 'InvalidGroup.Notfound':			
			print 'Creating Security Group: %s ' % ec2_group
			group = conn.create_security_group(ec2_group)
		else:
			raise
Example #11
0
def test_autoscaling_group_describe_instances():
    conn = boto.connect_autoscale()
    config = LaunchConfiguration(
        name='tester',
        image_id='ami-abcd1234',
        instance_type='t2.medium',
    )
    conn.create_launch_configuration(config)

    group = AutoScalingGroup(
        name='tester_group',
        max_size=2,
        min_size=2,
        launch_config=config,
    )
    conn.create_auto_scaling_group(group)

    instances = list(conn.get_all_autoscaling_instances())
    instances.should.have.length_of(2)
    instances[0].launch_config_name.should.equal('tester')
    autoscale_instance_ids = [instance.instance_id for instance in instances]

    ec2_conn = boto.connect_ec2()
    reservations = ec2_conn.get_all_instances()
    instances = reservations[0].instances
    instances.should.have.length_of(2)
    instance_ids = [instance.id for instance in instances]
    set(autoscale_instance_ids).should.equal(set(instance_ids))
    instances[0].instance_type.should.equal("t2.medium")
Example #12
0
def test_tag_limit_exceeded():
    conn = boto.connect_ec2('the_key', 'the_secret')
    reservation = conn.run_instances('ami-1234abcd')
    instance = reservation.instances[0]
    tag_dict = {}
    for i in range(51):
        tag_dict['{0:02d}'.format(i+1)] = ''

    with assert_raises(EC2ResponseError) as cm:
        conn.create_tags(instance.id, tag_dict)
    cm.exception.code.should.equal('TagLimitExceeded')
    cm.exception.status.should.equal(400)
    cm.exception.request_id.should_not.be.none

    instance.add_tag("a key", "a value")
    with assert_raises(EC2ResponseError) as cm:
        conn.create_tags(instance.id, tag_dict)
    cm.exception.code.should.equal('TagLimitExceeded')
    cm.exception.status.should.equal(400)
    cm.exception.request_id.should_not.be.none

    tags = conn.get_all_tags()
    tag = tags[0]
    tags.should.have.length_of(1)
    tag.name.should.equal("a key")
    tag.value.should.equal("a value")
Example #13
0
def test_retrieved_instances_must_contain_their_tags():
    tag_key = 'Tag name'
    tag_value = 'Tag value'
    tags_to_be_set = {tag_key: tag_value}

    conn = boto.connect_ec2('the_key', 'the_secret')
    reservation = conn.run_instances('ami-1234abcd')
    reservation.should.be.a(Reservation)
    reservation.instances.should.have.length_of(1)
    instance = reservation.instances[0]

    reservations = conn.get_all_instances()
    reservations.should.have.length_of(1)
    reservations[0].id.should.equal(reservation.id)
    instances = reservations[0].instances
    instances.should.have.length_of(1)
    instances[0].id.should.equal(instance.id)

    conn.create_tags([instance.id], tags_to_be_set)
    reservations = conn.get_all_instances()
    instance = reservations[0].instances[0]
    retrieved_tags = instance.tags

    # Cleanup of instance
    conn.terminate_instances([instances[0].id])

    # Check whether tag is present with correct value
    retrieved_tags[tag_key].should.equal(tag_value)
Example #14
0
def test_delete_security_group_in_vpc():
    conn = boto.connect_ec2('the_key', 'the_secret')
    vpc_id = "vpc-12345"
    security_group1 = conn.create_security_group('test1', 'test1', vpc_id)

    # this should not throw an exception
    conn.delete_security_group(group_id=security_group1.id)
Example #15
0
File: ec2.py Project: Bob87/geonode
def terminate():
    config = readconfig()
    instance_id = config.get('ec2', 'INSTANCE')
    conn = boto.connect_ec2()
    conn.get_all_instances()
    instance = None
    for reservation in conn.get_all_instances():
        for ins in reservation.instances:
            if ins.id == instance_id:
                instance = ins

    print 'Terminating instance'
    instance.terminate()
    # Give it 10 minutes to terminate
    for i in range(120):
        time.sleep(5)
        instance.update()
        print instance.state
        if instance.state == "terminated":
            config.set('ec2', 'HOST', '')
            config.set('ec2', 'INSTANCE', '')
            configfile = open(CONFIG_FILE, 'wb')
            config.write(configfile)
            configfile.close()
            break
Example #16
0
def test_snapshot_filters():
    conn = boto.connect_ec2('the_key', 'the_secret')
    volume1 = conn.create_volume(20, "us-east-1a")
    volume2 = conn.create_volume(25, "us-east-1a")

    snapshot1 = volume1.create_snapshot(description='testsnapshot1')
    snapshot2 = volume1.create_snapshot(description='testsnapshot2')
    snapshot3 = volume2.create_snapshot(description='testsnapshot3')
    
    conn.create_tags([snapshot1.id], {'testkey1': 'testvalue1'})
    conn.create_tags([snapshot2.id], {'testkey2': 'testvalue2'})

    snapshots_by_description = conn.get_all_snapshots(filters={'description': 'testsnapshot1'})
    set([snap.id for snap in snapshots_by_description]).should.equal(set([snapshot1.id]))

    snapshots_by_id = conn.get_all_snapshots(filters={'snapshot-id': snapshot1.id})
    set([snap.id for snap in snapshots_by_id]).should.equal(set([snapshot1.id]))

    snapshots_by_start_time = conn.get_all_snapshots(filters={'start-time': snapshot1.start_time})
    set([snap.start_time for snap in snapshots_by_start_time]).should.equal(set([snapshot1.start_time]))

    snapshots_by_volume_id = conn.get_all_snapshots(filters={'volume-id': volume1.id})
    set([snap.id for snap in snapshots_by_volume_id]).should.equal(set([snapshot1.id, snapshot2.id]))

    snapshots_by_volume_size = conn.get_all_snapshots(filters={'volume-size': volume1.size})
    set([snap.id for snap in snapshots_by_volume_size]).should.equal(set([snapshot1.id, snapshot2.id]))

    snapshots_by_tag_key = conn.get_all_snapshots(filters={'tag-key': 'testkey1'})
    set([snap.id for snap in snapshots_by_tag_key]).should.equal(set([snapshot1.id]))

    snapshots_by_tag_value = conn.get_all_snapshots(filters={'tag-value': 'testvalue1'})
    set([snap.id for snap in snapshots_by_tag_value]).should.equal(set([snapshot1.id]))

    snapshots_by_tag = conn.get_all_snapshots(filters={'tag:testkey1': 'testvalue1'})
    set([snap.id for snap in snapshots_by_tag]).should.equal(set([snapshot1.id]))
Example #17
0
def test_volume_tag_escaping():
    conn = boto.connect_ec2('the_key', 'the_secret')
    vol = conn.create_volume(10, 'us-east-1a')
    snapshot = conn.create_snapshot(vol.id, 'Desc')
    snapshot.add_tags({'key': '</closed>'})

    dict(conn.get_all_snapshots()[0].tags).should.equal({'key': '</closed>'})
def launch(ami,
    instance_type,
    key_name,
    key_extension,
    key_dir,
    group_name,
    ssh_port,
    cidr,
    tag,
    user_data,
    cmd_shell,
    login_user,
    ssh_passwd,
    address):

  ec2=boto.connect_ec2(aws_access_key_id="abc",aws_secret_access_key="abc")
  # Check to see if specified keypair already exists.
  # If we get an InvalidKeyPair.NotFound error back from EC2,
  # it means that it doesn't exist and we need to create it.

  try:
    print 'Check if specified keypair exists\n'
    key = ec2.get_all_key_pairs(keynames=[key_name])[0]

  except ec2.ResponseError, e:
    if e.code == 'InvalidKeyPair.NotFound':
      print 'Creating keypair: %s' % key_name
      key = ec2.create_key_pair(key_name)
      key.save(key_dir)
    else:
      raise
Example #19
0
def test_create_snapshot():
    conn = boto.connect_ec2('the_key', 'the_secret')
    volume = conn.create_volume(80, "us-east-1a")

    snapshot = volume.create_snapshot('a test snapshot')
    snapshot.update()
    snapshot.status.should.equal('completed')

    snapshots = conn.get_all_snapshots()
    snapshots.should.have.length_of(1)
    snapshots[0].description.should.equal('a test snapshot')
    snapshots[0].start_time.should_not.be.none

    # Create snapshot without description
    snapshot = volume.create_snapshot()
    conn.get_all_snapshots().should.have.length_of(2)

    snapshot.delete()
    conn.get_all_snapshots().should.have.length_of(1)

    # Deleting something that was already deleted should throw an error
    with assert_raises(EC2ResponseError) as cm:
        snapshot.delete()
    cm.exception.code.should.equal('InvalidSnapshot.NotFound')
    cm.exception.status.should.equal(400)
    cm.exception.request_id.should_not.be.none
Example #20
0
 def attach(self):
     ec2 = boto.connect_ec2()
     if self.logical_volume_name:
         # if a logical volume was specified, override the specified volume_id
         # (if there was one) with the current AWS volume for the logical volume:
         logical_volume = next(Volume.find(name = self.logical_volume_name))
         self.volume_id = logical_volume._volume_id
     volume = ec2.get_all_volumes([self.volume_id])[0]
     # wait for the volume to be available. The volume may still be being created
     # from a snapshot.
     while volume.update() != 'available':
         boto.log.info('Volume %s not yet available. Current status = %s.' % (volume.id, volume.status))
         time.sleep(5)
     instance = ec2.get_only_instances([self.instance_id])[0]
     attempt_attach = True
     while attempt_attach:
         try:
             ec2.attach_volume(self.volume_id, self.instance_id, self.device)
             attempt_attach = False
         except EC2ResponseError as e:
             if e.error_code != 'IncorrectState':
                 # if there's an EC2ResonseError with the code set to IncorrectState, delay a bit for ec2
                 # to realize the instance is running, then try again. Otherwise, raise the error:
                 boto.log.info('Attempt to attach the EBS volume %s to this instance (%s) returned %s. Trying again in a bit.' % (self.volume_id, self.instance_id, e.errors))
                 time.sleep(2)
             else:
                 raise e
     boto.log.info('Attached volume %s to instance %s as device %s' % (self.volume_id, self.instance_id, self.device))
     # now wait for the volume device to appear
     while not os.path.exists(self.device):
         boto.log.info('%s still does not exist, waiting 2 seconds' % self.device)
         time.sleep(2)
Example #21
0
    def start(self):
        self.stop()
        ec2 = boto.connect_ec2()
        ami = ec2.get_all_images(image_ids = [str(self.ami_id)])[0]
        groups = ec2.get_all_security_groups(groupnames=[str(self.security_group)])
        if not self._config:
            self.load_config()
        if not self._config.has_section("Credentials"):
            self._config.add_section("Credentials")
            self._config.set("Credentials", "aws_access_key_id", ec2.aws_access_key_id)
            self._config.set("Credentials", "aws_secret_access_key", ec2.aws_secret_access_key)

        if not self._config.has_section("Pyami"):
            self._config.add_section("Pyami")

        if self._manager.domain:
            self._config.set('Pyami', 'server_sdb_domain', self._manager.domain.name)
            self._config.set("Pyami", 'server_sdb_name', self.name)

        cfg = StringIO.StringIO()
        self._config.write(cfg)
        cfg = cfg.getvalue()
        r = ami.run(min_count=1,
                    max_count=1,
                    key_name=self.key_name,
                    security_groups = groups,
                    instance_type = self.instance_type,
                    placement = self.zone,
                    user_data = cfg)
        i = r.instances[0]
        self.instance_id = i.id
        self.put()
        if self.elastic_ip:
            ec2.associate_address(self.instance_id, self.elastic_ip)
Example #22
0
def test_remove_tag():
    conn = boto.connect_ec2('the_key', 'the_secret')
    reservation = conn.run_instances('ami-1234abcd')
    instance = reservation.instances[0]

    instance.add_tag("a key", "some value")

    tags = conn.get_all_tags()
    tag = tags[0]
    tag.name.should.equal("a key")
    tag.value.should.equal("some value")

    with assert_raises(EC2ResponseError) as ex:
        instance.remove_tag("a key", dry_run=True)
    ex.exception.error_code.should.equal('DryRunOperation')
    ex.exception.status.should.equal(400)
    ex.exception.message.should.equal(
        'An error occurred (DryRunOperation) when calling the DeleteTags operation: Request would have succeeded, but DryRun flag is set')

    instance.remove_tag("a key")
    conn.get_all_tags().should.have.length_of(0)

    instance.add_tag("a key", "some value")
    conn.get_all_tags().should.have.length_of(1)
    instance.remove_tag("a key", "some value")
Example #23
0
 def shutdown(self):
     on_completion = self.sd.get('on_completion', 'shutdown')
     if on_completion == 'shutdown':
         if self.instance_id:
             time.sleep(60)
             c = boto.connect_ec2()
             c.terminate_instances([self.instance_id])
def launching_ec2_instances(config):
   import boto
   conn =boto.connect_ec2(aws_access_key_id=config["cloud"]["user"],aws_secret_access_key=config["cloud"]["password"],debug=1)    
   
   reservation = conn.run_instances( config["cloud"]["template"] ,
        key_name=config["cloud"]["key"],
        subnet_id=config["cloud"]["subnet"],
        instance_type=config["cloud"]["instance_type"],
        security_group_ids=[ config["cloud"]["security_groups"]], 
        private_ip_address=config["command"]["ip"],
        placement=config["cloud"]["zone"])
   i = reservation.instances[0] 
   status = i.update()
   while status == 'pending':
      time.sleep(10)
      print('waiting 10s... ')
      status = i.update()
   if status == 'running':
      print('running adding tag... ')
      import hashlib
      conn.create_tags([i.id], {"name": "ScrambleDB" +random_md5like_hash()})
      # i.add_tag("Name","{{ScambleDB}}")
      
   else:
      print('Instance status: ' + status)
    
   #     security_groups=[ config["cloud"]["security_groups"]])
   
   return json.dumps(reservation)    
def associate_ec2_adresse(config):
    import boto
    conn =boto.connect_ec2(aws_access_key_id=config["cloud"]["user"],aws_secret_access_key=config["cloud"]["password"],debug=1)       
    conn.associate_address(allocation_id=config["cloud"]["elastic_ip_id"], network_interface_id=config["instance"]["interface"], allow_reassociation=True)
    conn.attach_network_interface(  network_interface_id=config["cloud"]["interface_vip_id"],instance_id=config["instance"]["id"] , device_index=1 )    
    
    return 1
Example #26
0
def create_ami(instance_id, name):
    """
    Create AMI image from specified instance

    The instance needs to be shutdown before the creation begin.
    """
    image_name = "{0}_{1}".format(name, datetime.now().strftime("%Y%m%d-%H%M"))

    conn = boto.connect_ec2()
    image_id = conn.create_image(instance_id=instance_id, name=image_name)
    puts("Creating AMI {0} for instance {1}".format(image_name, image_id))

    while True:
        puts('.', end='')
        sys.stdout.flush()

        image = conn.get_image(image_id)
        if image.state == 'available':
            break
        if image.state == "failed":
            abort("Error creating AMI for {0}".format(image_id))
        time.sleep(5.0)

    puts("\nImage {0} created".format(image_name))
    return image_id
def stop_ec2_instances(config):
   import boto
   conn = boto.connect_ec2(aws_access_key_id=config["cloud"]["user"],aws_secret_access_key=config["cloud"]["password"],debug=1) 
   res=conn.stop_instances(instance_ids=[config["command"]["group"]]) 
   for i in res:
      d.append({'id' : i.id , 'ip' : i.private_ip_address, 'state' : i.state}) 
   return json.dumps(d)
def status_ec2_vip_interface(config):
    import boto
    conn =boto.connect_ec2(aws_access_key_id=config["cloud"]["user"],aws_secret_access_key=config["cloud"]["password"],debug=1)       
    addresses =conn.get_all_network_interfaces()
    d=[]
    for i in addresses:
        print i.id
        print i.status
        attachment_id="na"
        try:
           i.attachment
        except NameError:
           i.attachment=None 
        if i.attachment is None:  
           attachment_id="na"
           instance_id="na"
        else:   
           attachment_id=i.attachment.id   
           instance_id=i.attachment.instance_id
           
        print attachment_id
        print instance_id
        print i.private_ip_address
        d.append({i.id : {'id' : i.id , 'status' : i.status, 'attachment_id' : attachment_id  , 'instance_id' : instance_id, 'ip' : i.private_ip_address }})

        return  json.dumps(d)

         
    return 0    
Example #29
0
def test_get_all_spot_instance_requests_filtering():
    """
    Test that moto correctly filters spot instance requests
    """
    conn = boto.connect_ec2()

    request1 = conn.request_spot_instances(
        price=0.5, image_id='ami-abcd1234',
    )
    request2 = conn.request_spot_instances(
        price=0.5, image_id='ami-abcd1234',
    )
    conn.request_spot_instances(
        price=0.5, image_id='ami-abcd1234',
    )
    request1[0].add_tag('tag1', 'value1')
    request1[0].add_tag('tag2', 'value2')
    request2[0].add_tag('tag1', 'value1')
    request2[0].add_tag('tag2', 'wrong')

    requests = conn.get_all_spot_instance_requests(filters={'state': 'active'})
    requests.should.have.length_of(0)

    requests = conn.get_all_spot_instance_requests(filters={'state': 'open'})
    requests.should.have.length_of(3)

    requests = conn.get_all_spot_instance_requests(
        filters={'tag:tag1': 'value1'})
    requests.should.have.length_of(2)

    requests = conn.get_all_spot_instance_requests(
        filters={'tag:tag1': 'value1', 'tag:tag2': 'value2'})
    requests.should.have.length_of(1)
Example #30
0
def stop_instance(instance_id):
	"""
	stop EC2 instance with given ID 
	"""
	print "... stopping EC2 instance ..."
	conn = boto.connect_ec2(ec2_key,ec2_secret)	
	conn.stop_instances(instance_ids=[instance_id])
Example #31
0
    def __init__(self,
                 name,
                 password,
                 instance_type,
                 ami=None,
                 valid_ami_owners=None,
                 valid_ami_location_regex=None,
                 elastic_ip=None,
                 identifier=None,
                 secret_identifier=None,
                 aws_id_file_path=None,
                 user_data=None,
                 keypair_name='latent_buildbot_slave',
                 security_name='latent_buildbot_slave',
                 max_builds=None,
                 notify_on_missing=[],
                 missing_timeout=60 * 20,
                 build_wait_timeout=60 * 10,
                 properties={},
                 locks=None):
        AbstractLatentBuildSlave.__init__(self, name, password, max_builds,
                                          notify_on_missing, missing_timeout,
                                          build_wait_timeout, properties,
                                          locks)
        if not ((ami is not None) ^ (valid_ami_owners is not None
                                     or valid_ami_location_regex is not None)):
            raise ValueError(
                'You must provide either a specific ami, or one or both of '
                'valid_ami_location_regex and valid_ami_owners')
        self.ami = ami
        if valid_ami_owners is not None:
            if isinstance(valid_ami_owners, (int, long)):
                valid_ami_owners = (valid_ami_owners, )
            else:
                for element in valid_ami_owners:
                    if not isinstance(element, (int, long)):
                        raise ValueError(
                            'valid_ami_owners should be int or iterable '
                            'of ints', element)
        if valid_ami_location_regex is not None:
            if not isinstance(valid_ami_location_regex, basestring):
                raise ValueError('valid_ami_location_regex should be a string')
            else:
                # verify that regex will compile
                re.compile(valid_ami_location_regex)
        self.valid_ami_owners = valid_ami_owners
        self.valid_ami_location_regex = valid_ami_location_regex
        self.instance_type = instance_type
        self.keypair_name = keypair_name
        self.security_name = security_name
        self.user_data = user_data
        if identifier is None:
            assert secret_identifier is None, (
                'supply both or neither of identifier, secret_identifier')
            if aws_id_file_path is None:
                home = os.environ['HOME']
                aws_id_file_path = os.path.join(home, '.ec2', 'aws_id')
            if not os.path.exists(aws_id_file_path):
                raise ValueError(
                    "Please supply your AWS access key identifier and secret "
                    "access key identifier either when instantiating this %s "
                    "or in the %s file (on two lines).\n" %
                    (self.__class__.__name__, aws_id_file_path))
            aws_file = open(aws_id_file_path, 'r')
            try:
                identifier = aws_file.readline().strip()
                secret_identifier = aws_file.readline().strip()
            finally:
                aws_file.close()
        else:
            assert aws_id_file_path is None, \
                    'if you supply the identifier and secret_identifier, ' \
                    'do not specify the aws_id_file_path'
            assert secret_identifier is not None, \
                    'supply both or neither of identifier, secret_identifier'
        # Make the EC2 connection.
        self.conn = boto.connect_ec2(identifier, secret_identifier)

        # Make a keypair
        #
        # We currently discard the keypair data because we don't need it.
        # If we do need it in the future, we will always recreate the keypairs
        # because there is no way to
        # programmatically retrieve the private key component, unless we
        # generate it and store it on the filesystem, which is an unnecessary
        # usage requirement.
        try:
            key_pair = self.conn.get_all_key_pairs(keypair_name)[0]
            assert key_pair
            # key_pair.delete() # would be used to recreate
        except boto.exception.EC2ResponseError, e:
            if 'InvalidKeyPair.NotFound' not in e.body:
                if 'AuthFailure' in e.body:
                    print('POSSIBLE CAUSES OF ERROR:\n'
                          '  Did you sign up for EC2?\n'
                          '  Did you put a credit card number in your AWS '
                          'account?\n'
                          'Please doublecheck before reporting a problem.\n')
                raise
            # make one; we would always do this, and stash the result, if we
            # needed the key (for instance, to SSH to the box).  We'd then
            # use paramiko to use the key to connect.
            self.conn.create_key_pair(keypair_name)
Example #32
0
 def connect(self):
     """ Creates an EC2Connection. """
     if self._connection is None:
         self._connection = connect_ec2(**self._credentials)
     return self._connection
Example #33
0
def test_authorize_ip_range_and_revoke():
    conn = boto.connect_ec2("the_key", "the_secret")
    security_group = conn.create_security_group("test", "test")

    with pytest.raises(EC2ResponseError) as ex:
        success = security_group.authorize(
            ip_protocol="tcp",
            from_port="22",
            to_port="2222",
            cidr_ip="123.123.123.123/32",
            dry_run=True,
        )
    ex.value.error_code.should.equal("DryRunOperation")
    ex.value.status.should.equal(400)
    ex.value.message.should.equal(
        "An error occurred (DryRunOperation) when calling the GrantSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set"
    )

    success = security_group.authorize(ip_protocol="tcp",
                                       from_port="22",
                                       to_port="2222",
                                       cidr_ip="123.123.123.123/32")
    assert success.should.be.true

    security_group = conn.get_all_security_groups(groupnames=["test"])[0]
    int(security_group.rules[0].to_port).should.equal(2222)
    security_group.rules[0].grants[0].cidr_ip.should.equal(
        "123.123.123.123/32")

    # Wrong Cidr should throw error
    with pytest.raises(EC2ResponseError) as cm:
        security_group.revoke(
            ip_protocol="tcp",
            from_port="22",
            to_port="2222",
            cidr_ip="123.123.123.122/32",
        )
    cm.value.code.should.equal("InvalidPermission.NotFound")
    cm.value.status.should.equal(400)
    cm.value.request_id.should_not.be.none

    # Actually revoke
    with pytest.raises(EC2ResponseError) as ex:
        security_group.revoke(
            ip_protocol="tcp",
            from_port="22",
            to_port="2222",
            cidr_ip="123.123.123.123/32",
            dry_run=True,
        )
    ex.value.error_code.should.equal("DryRunOperation")
    ex.value.status.should.equal(400)
    ex.value.message.should.equal(
        "An error occurred (DryRunOperation) when calling the RevokeSecurityGroupIngress operation: Request would have succeeded, but DryRun flag is set"
    )

    security_group.revoke(ip_protocol="tcp",
                          from_port="22",
                          to_port="2222",
                          cidr_ip="123.123.123.123/32")

    security_group = conn.get_all_security_groups()[0]
    security_group.rules.should.have.length_of(0)

    # Test for egress as well
    egress_security_group = conn.create_security_group("testegress",
                                                       "testegress",
                                                       vpc_id="vpc-3432589")

    with pytest.raises(EC2ResponseError) as ex:
        success = conn.authorize_security_group_egress(
            egress_security_group.id,
            "tcp",
            from_port="22",
            to_port="2222",
            cidr_ip="123.123.123.123/32",
            dry_run=True,
        )
    ex.value.error_code.should.equal("DryRunOperation")
    ex.value.status.should.equal(400)
    ex.value.message.should.equal(
        "An error occurred (DryRunOperation) when calling the GrantSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set"
    )

    success = conn.authorize_security_group_egress(
        egress_security_group.id,
        "tcp",
        from_port="22",
        to_port="2222",
        cidr_ip="123.123.123.123/32",
    )
    assert success.should.be.true
    egress_security_group = conn.get_all_security_groups(
        groupnames="testegress")[0]
    # There are two egress rules associated with the security group:
    # the default outbound rule and the new one
    int(egress_security_group.rules_egress[1].to_port).should.equal(2222)
    actual_cidr = egress_security_group.rules_egress[1].grants[0].cidr_ip
    # Deal with Python2 dict->unicode, instead of dict->string
    if type(actual_cidr) == "unicode":
        actual_cidr = json.loads(
            actual_cidr.replace("u'", "'").replace("'", '"'))
    actual_cidr.should.equal("123.123.123.123/32")

    # Wrong Cidr should throw error
    egress_security_group.revoke.when.called_with(
        ip_protocol="tcp",
        from_port="22",
        to_port="2222",
        cidr_ip="123.123.123.122/32").should.throw(EC2ResponseError)

    # Actually revoke
    with pytest.raises(EC2ResponseError) as ex:
        conn.revoke_security_group_egress(
            egress_security_group.id,
            "tcp",
            from_port="22",
            to_port="2222",
            cidr_ip="123.123.123.123/32",
            dry_run=True,
        )
    ex.value.error_code.should.equal("DryRunOperation")
    ex.value.status.should.equal(400)
    ex.value.message.should.equal(
        "An error occurred (DryRunOperation) when calling the RevokeSecurityGroupEgress operation: Request would have succeeded, but DryRun flag is set"
    )

    conn.revoke_security_group_egress(
        egress_security_group.id,
        "tcp",
        from_port="22",
        to_port="2222",
        cidr_ip="123.123.123.123/32",
    )

    egress_security_group = conn.get_all_security_groups()[0]
    # There is still the default outbound rule
    egress_security_group.rules_egress.should.have.length_of(1)
Example #34
0
 def connect_method(self, *args, **kwargs):
     return boto.connect_ec2(*args, **kwargs)
Example #35
0
def test_basic_decorator():
    conn = boto.connect_ec2('the_key', 'the_secret')
    list(conn.get_all_instances()).should.equal([])
def main(argv):
    def print_help(file=sys.stdout):
        print('server_deployment.py -a <EC2 Access Key> -s <EC2 Secret Key>',
              file=file)

    ec2_access_key = ""
    ec2_secret_key = ""
    try:
        opts, args = getopt.getopt(argv[1:], "ha:s:",
                                   ["ec2AccessKey=", "ec2SecretKey="])
    except getopt.GetoptError:
        print_help(file=sys.stderr)
        sys.exit(2)

    #print("opts:", opts, "args:", args)
    for opt, arg in opts:
        #print("option:", opt, "arg:", arg)
        if opt == '-h':
            print_help()
            sys.exit()
        elif opt in ("-a", "--ec2AccessKey"):
            ec2_access_key = arg
        elif opt in ("-s", "--ec2SecretKey"):
            ec2_secret_key = arg

    region = RegionInfo(name='melbourne', endpoint='nova.rc.nectar.org.au')
    ec2_conn = boto.connect_ec2(aws_access_key_id=ec2_access_key,
                                aws_secret_access_key=ec2_secret_key,
                                is_secure=True,
                                region=region,
                                port=8773,
                                path='/services/Cloud',
                                validate_certs=False)

    #images = ec2_conn.get_all_images()
    #for img in images:
    #	print('Image id: {id}, image name: {name}'.format(id=img.id, name=img.name))

    # reservations = ec2_conn.get_all_reservations()
    # print('Index\tID\t\tInstance')
    # for idx, res in enumerate(reservations):
    # 	print('{idx}\t{res_id}\t{res_inst}'.format(idx=idx, res_id=res.id, res_inst=res.instances))

    #Create instance with defualt value.

    ### Run for the last instance with 40 volumne
    reservation = ec2_conn.run_instances('ami-00003a61',
                                         key_name='team40',
                                         instance_type='m1.medium',
                                         security_groups=['default', 'ssh'],
                                         placement='melbourne-qh2')

    reservations = wait_for_instance(ec2_conn, reservation)

    print(
        '\nID: {r_id}\tStatus: {r_status}\tIP: {r_ip}\tPlacement: {r_placement}'
        .format(r_id=reservations[0].instances[0].id,
                r_status=reservations[0].instances[0].state,
                r_ip=reservations[0].instances[0].private_ip_address,
                r_placement=reservations[0].instances[0].placement))

    vol_req = ec2_conn.create_volume(40, 'melbourne-qh2')

    vol_req = wait_for_volume(ec2_conn, vol_req)
    print('Volume status: {}, volume AZ: {}'.format(vol_req[0].status,
                                                    vol_req[0].zone))
    ec2_conn.attach_volume(vol_req[0].id, reservations[0].instances[0].id,
                           '/dev/vdc')
                        default="edx.org",
                        help="The name of the zone under which to "
                        "create the dns entries.")
    parser.add_argument('-f',
                        '--force',
                        help="Force reuse of an existing name in a zone",
                        action="store_true",
                        default=False)
    parser.add_argument(
        '--aws-id',
        default=None,
        help="read only aws key for fetching instance information"
        "the account you wish add entries for")
    parser.add_argument(
        '--aws-secret',
        default=None,
        help="read only aws id for fetching instance information for"
        "the account you wish add entries for")

    args = parser.parse_args()
    # Connect to ec2 using the provided credentials on the commandline
    ec2_con = boto.connect_ec2(args.aws_id, args.aws_secret)
    elb_con = boto.connect_elb(args.aws_id, args.aws_secret)
    rds_con = boto.connect_rds(args.aws_id, args.aws_secret)

    # Connect to route53 using the user's .boto file
    r53 = boto.connect_route53()

    zone = get_or_create_hosted_zone(args.zone_name)
    update_elb_rds_dns(zone)
Example #38
0
def test_ami_create_from_missing_instance():
    conn = boto.connect_ec2('the_key', 'the_secret')
    args = ["i-abcdefg", "test-ami", "this is a test ami"]
    conn.create_image.when.called_with(*args).should.throw(EC2ResponseError)
Example #39
0
def connect(account_name, connection_type, **args):
    """

    Examples of use:
    ec2 = sts_connect.connect(environment, 'ec2', region=region, validate_certs=False)
    ec2 = sts_connect.connect(environment, 'ec2', validate_certs=False, debug=1000)
    ec2 = sts_connect.connect(environment, 'ec2')
    where environment is ( test, prod, dev )
    s3  = sts_connect.connect(environment, 's3')
    ses = sts_connect.connect(environment, 'ses')

    :param account: Account to connect with (i.e. test, prod, dev)

    :raises Exception: RDS Region not valid
                       AWS Tech not supported.

    :returns: STS Connection Object for given tech

    :note: To use this method a SecurityMonkey role must be created
            in the target account with full read only privledges.
    """
    account = Account.query.filter(Account.name == account_name).first()
    sts = boto.connect_sts()
    role = sts.assume_role(
        'arn:aws:iam::' + account.number + ':role/SecurityMonkey', 'secmonkey')

    if connection_type == 'botocore':
        botocore_session = botocore.session.get_session()
        botocore_session.set_credentials(role.credentials.access_key,
                                         role.credentials.secret_key,
                                         token=role.credentials.session_token)
        return botocore_session

    if connection_type == 'ec2':
        return boto.connect_ec2(role.credentials.access_key,
                                role.credentials.secret_key,
                                security_token=role.credentials.session_token,
                                **args)

    if connection_type == 'elb':
        if 'region' in args:
            region = args['region']
            del args['region']
        else:
            region = 'us-east-1'

        return boto.ec2.elb.connect_to_region(
            region,
            aws_access_key_id=role.credentials.access_key,
            aws_secret_access_key=role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 's3':
        if 'region' in args:
            region = args['region']
            # drop region key-val pair from args or you'll get an exception
            del args['region']
            return boto.s3.connect_to_region(
                region,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_s3(role.credentials.access_key,
                               role.credentials.secret_key,
                               security_token=role.credentials.session_token,
                               **args)

    if connection_type == 'ses':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.ses.connect_to_region(
                region,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_ses(role.credentials.access_key,
                                role.credentials.secret_key,
                                security_token=role.credentials.session_token,
                                **args)

    if connection_type == 'iam':
        if 'region' in args:
            region = args['region']
            # drop region key-val pair from args or you'll get an exception
            del args['region']
            return boto.iam.connect_to_region(
                region,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_iam(role.credentials.access_key,
                                role.credentials.secret_key,
                                security_token=role.credentials.session_token,
                                **args)

    if connection_type == 'route53':
        return boto.connect_route53(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    if connection_type == 'sns':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.sns.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_sns(role.credentials.access_key,
                                role.credentials.secret_key,
                                security_token=role.credentials.session_token,
                                **args)

    if connection_type == 'sqs':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.sqs.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_sqs(role.credentials.access_key,
                                role.credentials.secret_key,
                                security_token=role.credentials.session_token,
                                **args)

    if connection_type == 'vpc':
        return boto.connect_vpc(role.credentials.access_key,
                                role.credentials.secret_key,
                                security_token=role.credentials.session_token,
                                **args)

    if connection_type == 'rds':
        if 'region' in args:
            reg = args['region']
            rds_region = None
            for boto_region in boto.rds.regions():
                if reg.name == boto_region.name:
                    rds_region = boto_region

            if rds_region is None:
                raise Exception(
                    'The supplied region {0} is not in boto.rds.regions. {1}'.
                    format(reg, boto.rds.regions()))

        return boto.connect_rds(role.credentials.access_key,
                                role.credentials.secret_key,
                                security_token=role.credentials.session_token,
                                **args)

    if connection_type == 'redshift':
        if 'region' in args:
            region = args['region']
            del args['region']
            return boto.redshift.connect_to_region(
                region.name,
                aws_access_key_id=role.credentials.access_key,
                aws_secret_access_key=role.credentials.secret_key,
                security_token=role.credentials.session_token,
                **args)

        return boto.connect_redshift(
            role.credentials.access_key,
            role.credentials.secret_key,
            security_token=role.credentials.session_token,
            **args)

    err_msg = 'The connection_type supplied (%s) is not implemented.' % connection_type
    raise Exception(err_msg)
Example #40
0
                      help="Don't prompt for confirmation")
    parser.add_option(
        "--match",
        action="store",
        dest="match_re",
        default="^ansible-testing-",
        help="Regular expression used to find AWS resources (default: %default)"
    )

    (opts, args) = parser.parse_args()
    for required in ['ec2_access_key', 'ec2_secret_key']:
        if getattr(opts, required) is None:
            parser.error("Missing required parameter: --%s" % required)

    return (opts, args)


if __name__ == '__main__':

    (opts, args) = parse_args()

    # Connect to AWS
    aws = boto.connect_ec2(aws_access_key_id=opts.ec2_access_key,
                           aws_secret_access_key=opts.ec2_secret_key)

    # Delete matching keys
    delete_aws_resources(aws.get_all_key_pairs, 'name', opts)

    # Delete matching groups
    delete_aws_resources(aws.get_all_security_groups, 'name', opts)
Example #41
0
def test_ami_attribute():
    conn = boto.connect_ec2('the_key', 'the_secret')
    reservation = conn.run_instances('ami-1234abcd')
    instance = reservation.instances[0]
    image_id = conn.create_image(instance.id, "test-ami", "this is a test ami")
    image = conn.get_image(image_id)

    # Baseline
    attributes = conn.get_image_attribute(image.id, attribute='launchPermission')
    attributes.name.should.equal('launch_permission')
    attributes.attrs.should.have.length_of(0)

    ADD_GROUP_ARGS = {'image_id': image.id,
                      'attribute': 'launchPermission',
                      'operation': 'add',
                      'groups': 'all'}

    REMOVE_GROUP_ARGS = {'image_id': image.id,
                         'attribute': 'launchPermission',
                         'operation': 'remove',
                         'groups': 'all'}

    # Add 'all' group and confirm
    conn.modify_image_attribute(**ADD_GROUP_ARGS)

    attributes = conn.get_image_attribute(image.id, attribute='launchPermission')
    attributes.attrs['groups'].should.have.length_of(1)
    attributes.attrs['groups'].should.equal(['all'])

    # Add is idempotent
    conn.modify_image_attribute.when.called_with(**ADD_GROUP_ARGS).should_not.throw(EC2ResponseError)

    # Remove 'all' group and confirm
    conn.modify_image_attribute(**REMOVE_GROUP_ARGS)

    attributes = conn.get_image_attribute(image.id, attribute='launchPermission')
    attributes.attrs.should.have.length_of(0)

    # Remove is idempotent
    conn.modify_image_attribute.when.called_with(**REMOVE_GROUP_ARGS).should_not.throw(EC2ResponseError)

    # Error: Add with group != 'all'
    with assert_raises(EC2ResponseError) as cm:
        conn.modify_image_attribute(image.id,
                                    attribute='launchPermission',
                                    operation='add',
                                    groups='everyone')
    cm.exception.code.should.equal('InvalidAMIAttributeItemValue')
    cm.exception.status.should.equal(400)
    cm.exception.request_id.should_not.be.none

    # Error: Add with invalid image ID
    with assert_raises(EC2ResponseError) as cm:
        conn.modify_image_attribute("ami-abcd1234",
                                    attribute='launchPermission',
                                    operation='add',
                                    groups='all')
    cm.exception.code.should.equal('InvalidAMIID.NotFound')
    cm.exception.status.should.equal(400)
    cm.exception.request_id.should_not.be.none

    # Error: Remove with invalid image ID
    with assert_raises(EC2ResponseError) as cm:
        conn.modify_image_attribute("ami-abcd1234",
                                    attribute='launchPermission',
                                    operation='remove',
                                    groups='all')
    cm.exception.code.should.equal('InvalidAMIID.NotFound')
    cm.exception.status.should.equal(400)
    cm.exception.request_id.should_not.be.none

    # Error: Add or remove with user ID instead of group
    conn.modify_image_attribute.when.called_with(image.id,
                                                 attribute='launchPermission',
                                                 operation='add',
                                                 user_ids=['user']).should.throw(NotImplementedError)
    conn.modify_image_attribute.when.called_with(image.id,
                                                 attribute='launchPermission',
                                                 operation='remove',
                                                 user_ids=['user']).should.throw(NotImplementedError)
Example #42
0
    ])
args = parser.parse_args()

# process common command line arguments
log = logging.getLogger('botocross')
bc.configure_logging(log, args.log_level)
credentials = bc.parse_credentials(args)
regions = bc.filter_regions(boto.ec2.regions(), args.region)
filter = bc.build_filter(args.filter, args.exclude)
log.info(args.resource_ids)

# execute business logic
log.info("Describing EBS volumes:")

for region in regions:
    try:
        ec2 = boto.connect_ec2(region=region, **credentials)
        volumes = ec2.get_all_volumes(volume_ids=args.resource_ids,
                                      filters=filter['filters'])
        if filter['excludes']:
            exclusions = ec2.get_all_volumes(filters=filter['excludes'])
            volumes = bc.filter_list_by_attribute(volumes, exclusions, 'id')
        print region.name + ": " + str(len(volumes)) + " volumes"
        for volume in volumes:
            if args.verbose:
                pprint(vars(volume))
            else:
                print volume.id
    except boto.exception.BotoServerError, e:
        log.error(e.error_message)
Example #43
0
    def __init__(self, name, password, instance_type, ami=None,
                 valid_ami_owners=None, valid_ami_location_regex=None,
                 elastic_ip=None, identifier=None, secret_identifier=None,
                 aws_id_file_path=None, user_data=None, region=None,
                 keypair_name=None,
                 security_name=None,
                 spot_instance=False, max_spot_price=1.6, volumes=None,
                 placement=None, price_multiplier=1.2, tags=None, retry=1,
                 retry_price_adjustment=1, product_description='Linux/UNIX',
                 **kwargs):

        if not boto:
            config.error("The python module 'boto' is needed to use a "
                         "EC2LatentWorker")

        if keypair_name is None:
            reportDeprecatedWorkerNameUsage(
                "Use of default value of 'keypair_name' of EC2LatentWorker "
                "constructor is deprecated. Please explicitly specify value")
            keypair_name = 'latent_buildbot_slave'
        if security_name is None:
            reportDeprecatedWorkerNameUsage(
                "Use of default value of 'security_name' of EC2LatentWorker "
                "constructor is deprecated. Please explicitly specify value")
            security_name = 'latent_buildbot_slave'

        if volumes is None:
            volumes = []

        if tags is None:
            tags = {}

        AbstractLatentWorker.__init__(self, name, password, **kwargs)

        if not ((ami is not None) ^
                (valid_ami_owners is not None or
                 valid_ami_location_regex is not None)):
            raise ValueError(
                'You must provide either a specific ami, or one or both of '
                'valid_ami_location_regex and valid_ami_owners')
        self.ami = ami
        if valid_ami_owners is not None:
            if isinstance(valid_ami_owners, (int, long)):
                valid_ami_owners = (valid_ami_owners,)
            else:
                for element in valid_ami_owners:
                    if not isinstance(element, (int, long)):
                        raise ValueError(
                            'valid_ami_owners should be int or iterable '
                            'of ints', element)
        if valid_ami_location_regex is not None:
            if not isinstance(valid_ami_location_regex, basestring):
                raise ValueError(
                    'valid_ami_location_regex should be a string')
            else:
                # verify that regex will compile
                re.compile(valid_ami_location_regex)
        self.valid_ami_owners = valid_ami_owners
        self.valid_ami_location_regex = valid_ami_location_regex
        self.instance_type = instance_type
        self.keypair_name = keypair_name
        self.security_name = security_name
        self.user_data = user_data
        self.spot_instance = spot_instance
        self.max_spot_price = max_spot_price
        self.volumes = volumes
        self.price_multiplier = price_multiplier
        self.retry_price_adjustment = retry_price_adjustment
        self.retry = retry
        self.attempt = 1
        self.product_description = product_description
        if None not in [placement, region]:
            self.placement = '%s%s' % (region, placement)
        else:
            self.placement = None
        if identifier is None:
            assert secret_identifier is None, (
                'supply both or neither of identifier, secret_identifier')
            if aws_id_file_path is None:
                home = os.environ['HOME']
                default_path = os.path.join(home, '.ec2', 'aws_id')
                if os.path.exists(default_path):
                    aws_id_file_path = default_path
            if aws_id_file_path:
                log.msg('WARNING: EC2LatentWorker is using deprecated '
                        'aws_id file')
                with open(aws_id_file_path, 'r') as aws_file:
                    identifier = aws_file.readline().strip()
                    secret_identifier = aws_file.readline().strip()
        else:
            assert aws_id_file_path is None, \
                'if you supply the identifier and secret_identifier, ' \
                'do not specify the aws_id_file_path'
            assert secret_identifier is not None, \
                'supply both or neither of identifier, secret_identifier'

        region_found = None

        # Make the EC2 connection.
        if region is not None:
            for r in boto.ec2.regions(aws_access_key_id=identifier,
                                      aws_secret_access_key=secret_identifier):

                if r.name == region:
                    region_found = r

            if region_found is not None:
                self.conn = boto.ec2.connect_to_region(region,
                                                       aws_access_key_id=identifier,
                                                       aws_secret_access_key=secret_identifier)
            else:
                raise ValueError(
                    'The specified region does not exist: ' + region)

        else:
            self.conn = boto.connect_ec2(identifier, secret_identifier)

        # Make a keypair
        #
        # We currently discard the keypair data because we don't need it.
        # If we do need it in the future, we will always recreate the keypairs
        # because there is no way to
        # programmatically retrieve the private key component, unless we
        # generate it and store it on the filesystem, which is an unnecessary
        # usage requirement.
        try:
            key_pair = self.conn.get_all_key_pairs(keypair_name)[0]
            assert key_pair
            # key_pair.delete() # would be used to recreate
        except boto.exception.EC2ResponseError as e:
            if 'InvalidKeyPair.NotFound' not in e.body:
                if 'AuthFailure' in e.body:
                    log.msg('POSSIBLE CAUSES OF ERROR:\n'
                            '  Did you supply your AWS credentials?\n'
                            '  Did you sign up for EC2?\n'
                            '  Did you put a credit card number in your AWS '
                            'account?\n'
                            'Please doublecheck before reporting a problem.\n')
                raise
            # make one; we would always do this, and stash the result, if we
            # needed the key (for instance, to SSH to the box).  We'd then
            # use paramiko to use the key to connect.
            self.conn.create_key_pair(keypair_name)

        # create security group
        try:
            group = self.conn.get_all_security_groups(security_name)[0]
            assert group
        except boto.exception.EC2ResponseError as e:
            if 'InvalidGroup.NotFound' in e.body:
                self.security_group = self.conn.create_security_group(
                    security_name,
                    'Authorization to access the buildbot instance.')
                # Authorize the master as necessary
                # TODO this is where we'd open the hole to do the reverse pb
                # connect to the buildbot
                # ip = urllib.urlopen(
                #     'http://checkip.amazonaws.com').read().strip()
                # self.security_group.authorize('tcp', 22, 22, '%s/32' % ip)
                # self.security_group.authorize('tcp', 80, 80, '%s/32' % ip)
            else:
                raise

        # get the image
        if self.ami is not None:
            self.image = self.conn.get_image(self.ami)
        else:
            # verify we have access to at least one acceptable image
            discard = self.get_image()
            assert discard

        # get the specified elastic IP, if any
        if elastic_ip is not None:
            elastic_ip = self.conn.get_all_addresses([elastic_ip])[0]
        self.elastic_ip = elastic_ip
        self.tags = tags
Example #44
0
import boto

conn = boto.connect_ec2()
print conn
spotRequests = conn.get_all_spot_instance_requests()
for sr in spotRequests:
    print sr
Example #45
0
import boto
import time
from boto.ec2.regioninfo import RegionInfo

#connect the cloud with EC2
region = RegionInfo(name='melbourne', endpoint='nova.rc.nectar.org.au')

access_key = "eb0444bb655d450f90178aced766fc3e"
secret_key = "a436033500bd461f844d81e429168753"

ec2_conn = boto.connect_ec2(aws_access_key_id=access_key,
                            aws_secret_access_key=secret_key,
                            is_secure=True,
                            region=region,
                            port=8773,
                            path='/services/Cloud',
                            validate_certs=False)

#create security groups
automation = ec2_conn.create_security_group('automation',
                                            'automatic security group')
automation.authorize('tcp', 80, 80, '0.0.0.0/0')
automation.authorize('tcp', 22, 22, '0.0.0.0/0')
automation.authorize('tcp', 5984, 5984, '0.0.0.0/0')

# initialize instance and volume
instNum = 2
for k in range(instNum):
    #launch instances
    ec2_conn.run_instances(image_id='ami-00003720',
                           key_name='feng',
Example #46
0
def get_conn(accesskey, secretkey):
    if accesskey and secretkey:
        return boto.connect_ec2(accesskey, secretkey)
    else:
        return boto.connect_ec2()
Example #47
0
def main():
  (opts, action, cluster_name) = parse_args()
  conn = boto.connect_ec2()
  err = 0

  # Select an AZ at random if it was not specified.
  if opts.zone == "" or opts.zone == "none":
    opts.zone = random.choice(conn.get_all_zones()).name

  if action == "launch":
    if opts.resume:
      (master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
          conn, opts, cluster_name)
    else:
      (master_nodes, slave_nodes, zoo_nodes) = launch_cluster(
          conn, opts, cluster_name)
      wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes, zoo_nodes)
    setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, True)
    print "Waiting for mesos cluster to start..."
    err = wait_for_mesos_cluster(master_nodes, opts)
    if err != 0:
      print >> stderr, "ERROR: mesos-check failed for spark_ec2"
      sys.exit(1)
    if opts.copy:
      copy_ampcamp_data(master_nodes, opts)
    print >>stderr, ("SUCCESS: Cluster successfully launched! You can login to the master at " + master_nodes[0].public_dns_name)

  elif action == "destroy":
    response = raw_input("Are you sure you want to destroy the cluster " +
        cluster_name + "?\nALL DATA ON ALL NODES WILL BE LOST!!\n" +
        "Destroy cluster " + cluster_name + " (y/N): ")
    if response == "y":
      (master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
          conn, opts, cluster_name)
      print "Terminating master..."
      for inst in master_nodes:
        inst.terminate()
      print "Terminating slaves..."
      for inst in slave_nodes:
        inst.terminate()
      if zoo_nodes != []:
        print "Terminating zoo..."
        for inst in zoo_nodes:
          inst.terminate()

  elif action == "login":
    (master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
        conn, opts, cluster_name)
    master = master_nodes[0].public_dns_name
    print "Logging into master " + master + "..."
    proxy_opt = ""
    if opts.proxy_port != None:
      proxy_opt = "-D " + opts.proxy_port
    subprocess.check_call("ssh -o StrictHostKeyChecking=no -i %s %s %s@%s" %
        (opts.identity_file, proxy_opt, opts.user, master), shell=True)

  elif action == "get-master":
    (master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(conn, opts, cluster_name)
    print master_nodes[0].public_dns_name

  elif action == "copy-data":
    (master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(conn, opts, cluster_name)
    print "Waiting for mesos cluster to start..."
    err = wait_for_mesos_cluster(master_nodes, opts)
    if err != 0:
      print >> stderr, "ERROR: mesos-check failed for spark_ec2"
      sys.exit(1)
    copy_ampcamp_data(master_nodes, opts)
    print >>stderr, ("SUCCESS: Data copied successfully! You can login to the master at " + master_nodes[0].public_dns_name)

  elif action == "stop":
    response = raw_input("Are you sure you want to stop the cluster " +
        cluster_name + "?\nDATA ON EPHEMERAL DISKS WILL BE LOST, " +
        "BUT THE CLUSTER WILL KEEP USING SPACE ON\n" + 
        "AMAZON EBS IF IT IS EBS-BACKED!!\n" +
        "Stop cluster " + cluster_name + " (y/N): ")
    if response == "y":
      (master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
          conn, opts, cluster_name)
      print "Stopping master..."
      for inst in master_nodes:
        if inst.state not in ["shutting-down", "terminated"]:
          inst.stop()
      print "Stopping slaves..."
      for inst in slave_nodes:
        if inst.state not in ["shutting-down", "terminated"]:
          inst.stop()
      if zoo_nodes != []:
        print "Stopping zoo..."
        for inst in zoo_nodes:
          if inst.state not in ["shutting-down", "terminated"]:
            inst.stop()

  elif action == "start":
    (master_nodes, slave_nodes, zoo_nodes) = get_existing_cluster(
        conn, opts, cluster_name)
    print "Starting slaves..."
    for inst in slave_nodes:
      if inst.state not in ["shutting-down", "terminated"]:
        inst.start()
    print "Starting master..."
    for inst in master_nodes:
      if inst.state not in ["shutting-down", "terminated"]:
        inst.start()
    if zoo_nodes != []:
      print "Starting zoo..."
      for inst in zoo_nodes:
        if inst.state not in ["shutting-down", "terminated"]:
          inst.start()
    wait_for_cluster(conn, opts.wait, master_nodes, slave_nodes, zoo_nodes)
    setup_cluster(conn, master_nodes, slave_nodes, zoo_nodes, opts, False)
    print "Waiting for mesos cluster to start..."
    err = wait_for_mesos_cluster(master_nodes, opts)
    if err != 0:
      print >> stderr, "ERROR: mesos-check failed for spark_ec2"
      sys.exit(1)
    if opts.copy:
      copy_ampcamp_data(master_nodes, opts)
    print >>stderr, ("SUCCESS: Cluster successfully launched! You can login to the master at " + master_nodes[0].public_dns_name)


  else:
    print >> stderr, "Invalid action: %s" % action
    sys.exit(1)
Example #48
0
def main(argv):
    def print_help(file=sys.stdout):
        print('server_deployment.py -a <EC2 Access Key> -s <EC2 Secret Key>',
              file=file)

    ec2_access_key = ""
    ec2_secret_key = ""
    try:
        opts, args = getopt.getopt(argv[1:], "ha:s:",
                                   ["ec2AccessKey=", "ec2SecretKey="])
    except getopt.GetoptError:
        print_help(file=sys.stderr)
        sys.exit(2)

    #print("opts:", opts, "args:", args)
    for opt, arg in opts:
        #print("option:", opt, "arg:", arg)
        if opt == '-h':
            print_help()
            sys.exit()
        elif opt in ("-a", "--ec2AccessKey"):
            ec2_access_key = arg
        elif opt in ("-s", "--ec2SecretKey"):
            ec2_secret_key = arg

    hosts_file_content = """127.0.0.1       localhost

# The following lines are desirable for IPv6 capable hosts
::1     localhost       ip6-localhost   ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
"""

    hosts = ""

    region = RegionInfo(name='melbourne', endpoint='nova.rc.nectar.org.au')
    ec2_conn = boto.connect_ec2(aws_access_key_id=ec2_access_key,
                                aws_secret_access_key=ec2_secret_key,
                                is_secure=True,
                                region=region,
                                port=8773,
                                path='/services/Cloud',
                                validate_certs=False)

    #images = ec2_conn.get_all_images()
    #for img in images:
    #	print('Image id: {id}, image name: {name}'.format(id=img.id, name=img.name))

    # reservations = ec2_conn.get_all_reservations()
    # print('Index\tID\t\tInstance')
    # for idx, res in enumerate(reservations):
    # 	print('{idx}\t{res_id}\t{res_inst}'.format(idx=idx, res_id=res.id, res_inst=res.instances))

    ### Run for the last instance with 40 volumne
    reservation = ec2_conn.run_instances('ami-00003a61',
                                         key_name='team40',
                                         instance_type='m1.medium',
                                         security_groups=[
                                             'default', 'ssh',
                                             'subnet_free_access', 'ambari',
                                             'hadoop'
                                         ],
                                         placement='melbourne-qh2')

    reservations = wait_for_instance(ec2_conn, reservation)

    hosts += "{ip}\t{host}-0.localdomain\t{host}-0\t#management\n".format(
        host=reservations[0].id,
        ip=reservations[0].instances[0].private_ip_address)

    print(
        '\nID: {r_id}\tStatus: {r_status}\tIP: {r_ip}\tPlacement: {r_placement}'
        .format(r_id=reservations[0].instances[0].id,
                r_status=reservations[0].instances[0].state,
                r_ip=reservations[0].instances[0].private_ip_address,
                r_placement=reservations[0].instances[0].placement))

    vol_req = ec2_conn.create_volume(40, 'melbourne-qh2')

    vol_req = wait_for_volume(ec2_conn, vol_req)
    print('Volume status: {}, volume AZ: {}'.format(vol_req[0].status,
                                                    vol_req[0].zone))
    ec2_conn.attach_volume(vol_req[0].id, reservations[0].instances[0].id,
                           '/dev/vdc')

    #Create instance with defualt value.
    for i in range(3):
        #'ami-00003a61'
        reservation = ec2_conn.run_instances(
            'ami-00003a61',
            key_name='team40',
            instance_type='m1.medium',
            security_groups=['default', 'ssh', 'subnet_free_access', 'hadoop'],
            placement='melbourne-qh2')

        reservations = wait_for_instance(ec2_conn, reservation)

        hosts += "{ip}\t{host}-0.localdomain\t{host}-0\t#node{number}\n".format(
            host=reservations[0].id,
            ip=reservations[0].instances[0].private_ip_address,
            number=i)

        print(
            '\nID: {r_id}\tStatus: {r_status}\tIP: {r_ip}\tPlacement: {r_placement}'
            .format(r_id=reservations[0].instances[0].id,
                    r_status=reservations[0].instances[0].state,
                    r_ip=reservations[0].instances[0].private_ip_address,
                    r_placement=reservations[0].instances[0].placement))

        vol_req = ec2_conn.create_volume(70, 'melbourne-qh2')

        vol_req = wait_for_volume(ec2_conn, vol_req)
        print('Volume status: {}, volume AZ: {}'.format(
            vol_req[0].status, vol_req[0].zone))
        ec2_conn.attach_volume(vol_req[0].id, reservations[0].instances[0].id,
                               '/dev/vdc')

    hosts_file_content += hosts

    with open("./hosts", "w") as hosts_file:
        print(hosts_file_content, file=hosts_file)

    with open("./hosts.txt", "w") as host_list_file:
        print(hosts, file=host_list_file)
        sys.exit(1)

    ami = sys.argv[1]
    keypair = sys.argv[2]
    num_instances = sys.argv[3]

    f = open('%s/.awssecret' % expanduser('~'), 'r')
    AWS_KEY = f.readline().strip()
    AWS_SECRET = f.readline().strip()
    f.close()

    print AWS_KEY
    print AWS_SECRET

    print "connecting to ec2"
    ec2_conn = boto.connect_ec2(AWS_KEY, AWS_SECRET)

    print "getting image", ami
    images = ec2_conn.get_all_images(image_ids=[ami])

    print "requesting", num_instances, "instance(s)"
    rsrv = images[0].run(1, num_instances, keypair)

    f = open('instances', 'w')
    pending = list(rsrv.instances)
    running = []
    configured = []
    while len(configured) < len(rsrv.instances):

        print "pending:", pending
        print "running:", running
Example #50
0
def terminate_instance(instance_id):
    ec2_conn = boto.connect_ec2()
    ec2_conn.stop_instances([instance_id])
Example #51
0
def main(argv):
    launch_instances = False
    if len(argv) >= 1 and argv[0] == "True":
        launch_instances = True

    utilization_user_pairs = [(0.25, "high:1:0"), (0.5, "high:1:0,low:1:0"),
                              (0.75, "high:1:0,low:2:0"),
                              (1.0, "high:1:0,low:3:0"),
                              (1.25, "high:1:0,low:4:0"),
                              (1.5, "high:1:0,low:5:0"),
                              (1.75, "high:1:0,low:6:0"),
                              (2.0, "high:1:0,low:7:0")]
    sample_ratios = [2.0]
    sample_ratio_constrained = 1

    # Amount of time it takes each task to run in isolation
    task_duration_ms = 100
    tasks_per_job = 1
    private_ssh_key = "patkey.pem"
    sparrow_branch = "debugging"
    num_backends = 5
    num_frontends = 1
    cores_per_backend = 4
    # Run each trial for 5 minutes.
    trial_length = 500
    num_preferred_nodes = 0
    nm_task_scheduler = "priority"
    cluster_name = "isolation"

    full_utilization_rate_s = (
        float(num_backends * cores_per_backend * 1000) /
        (task_duration_ms * tasks_per_job * num_frontends))

    # Warmup information
    warmup_s = 120
    post_warmup_s = 30
    warmup_arrival_rate_s = 0.4 * full_utilization_rate_s

    if launch_instances:
        print "********Launching instances..."
        run_cmd(
            ("./ec2-exp.sh launch %s --ami ami-a658c0cf " +
             "--instance-type cr1.8xlarge --spot-price %s -f %s -b %s -i %s") %
            (cluster_name, 0.5, num_frontends, num_backends, private_ssh_key))
        time.sleep(10)

    for sample_ratio in sample_ratios:
        for utilization, users in utilization_user_pairs:
            arrival_rate_s = utilization * full_utilization_rate_s

            # This is a little bit of a hacky way to pass args to the ec2 script.
            (opts, args) = ec2_exp.parse_args(False)
            opts.identity_file = private_ssh_key
            opts.arrival_rate = arrival_rate_s
            opts.branch = sparrow_branch
            opts.sample_ratio = sample_ratio
            opts.sample_ratio_constrained = sample_ratio_constrained
            opts.tasks_per_job = tasks_per_job
            opts.num_preferred_nodes = num_preferred_nodes
            opts.cpus = cores_per_backend

            conn = boto.connect_ec2()
            frontends, backends = ec2_exp.find_existing_cluster(
                conn, opts, cluster_name)

            print(
                "********Launching experiment at utilization %s with sample ratio %s..."
                % (utilization, sample_ratio))

            print(
                "********Deploying with arrival rate %s and warmup arrival rate %s"
                % (arrival_rate_s, warmup_arrival_rate_s))
            ec2_exp.deploy_cluster(frontends, backends, opts,
                                   warmup_arrival_rate_s, warmup_s,
                                   post_warmup_s, nm_task_scheduler, users)
            ec2_exp.start_sparrow(frontends, backends, opts)

            print "*******Sleeping after starting Sparrow"
            time.sleep(10)
            print "********Starting prototype frontends and backends"
            ec2_exp.start_proto(frontends, backends, opts)
            time.sleep(trial_length)

            log_dirname = "/disk1/sparrow/isolation_%s_%s" % (utilization,
                                                              sample_ratio)
            while os.path.exists(log_dirname):
                log_dirname = "%s_a" % log_dirname
            os.mkdir(log_dirname)

            ec2_exp.execute_command(frontends, backends, opts,
                                    "./find_bugs.sh")

            print "********Stopping prototypes and Sparrow"
            ec2_exp.stop_proto(frontends, backends, opts)
            ec2_exp.stop_sparrow(frontends, backends, opts)

            print "********Collecting logs and placing in %s" % log_dirname
            opts.log_dir = log_dirname
            ec2_exp.collect_logs(frontends, backends, opts)
            run_cmd("gunzip %s/*.gz" % log_dirname)

            print "********Parsing logs"
            run_cmd((
                "cd /tmp/sparrow/src/main/python/ && ./parse_logs.sh log_dir=%s "
                "output_dir=%s/results start_sec=350 end_sec=450 && cd -") %
                    (log_dirname, log_dirname))
Example #52
0
#!/usr/bin/python

import boto

# praveen snpashot

ec2 = boto.connect_ec2('AKIAI2R6ABPA43HF7H2Q',
                       'tNVN0Da05SX2JUD3eX0ePDGkQ2uW4xbJpkGaYP6Y')
volumes = ec2.get_all_volumes()
for volume in volumes:
    ec2.create_snapshot(volume.id, "backup made with backup script")

# praveen snapshot
import boto
ec2 = boto.connect_ec2('AKIAI2R6ABPA43HF7H2Q',
                       'tNVN0Da05SX2JUD3eX0ePDGkQ2uW4xbJpkGaYP6Y')
volumes = ec2.get_all_volumes()
for volume in volumes:
    if snapshot.description == "backup made with backup script":
        snapshot.delete()

        #!/usr/bin/env python
Example #53
0
def _get_all_running_instances():
    ec2_conn = boto.connect_ec2()
    running_reservations = ec2_conn.get_all_instances(
        filters={"instance-state-name": "running"})
    instances = _get_instances_from_reservations(running_reservations)
    return instances
Example #54
0
# Author :  PENG WANG
# Student Number : 680868
# Supervisor : Prof. Richard Sinnott
# Subject: COMP90055 COMPUTING PROJECT
# Project Name : Gender Identification and Sentiment Analysis on Twitter through machine learning approaches

import boto
import time
from boto.ec2.regioninfo import RegionInfo

#Set up the region and Establish the connection
region=RegionInfo(name='melbourne', endpoint='nova.rc.nectar.org.au')
ec2_comm = boto.connect_ec2(aws_access_key_id='1bf4fd7557a84d559ae85a9455837b78', aws_secret_access_key='9c8d5a0fae2c4c87bd86e42d3ae9fe33', is_secure=True, 
	region=region, port=8773, path='/services"})/Cloud', validate_certs=False)

#This is used to store the ip addresses of Virtual Machines
VM_ips = []
print("Connection is established!")

#Check Security group and add new groups as well as rules if needed

def check_group_status(groupname):
	check = False
	group = ec2_comm.get_all_security_groups()
	for g in group:
	    if g.name == groupname:
	        check = True
	return check

def create_security_group(groupname):
	check = check_group_status(groupname)
Example #55
0
 def test_the_class(self):
     conn = boto.connect_ec2()
     list(conn.get_all_instances()).should.have.length_of(0)
Example #56
0
def test_sec_group_rule_limit_vpc():
    ec2_conn = boto.connect_ec2()
    vpc_conn = boto.connect_vpc()

    vpc = vpc_conn.create_vpc("10.0.0.0/16")

    sg = ec2_conn.create_security_group("test", "test", vpc_id=vpc.id)
    other_sg = ec2_conn.create_security_group("test_2", "test", vpc_id=vpc.id)

    # INGRESS
    with pytest.raises(EC2ResponseError) as cm:
        ec2_conn.authorize_security_group(
            group_id=sg.id,
            ip_protocol="-1",
            cidr_ip=["{0}.0.0.0/0".format(i) for i in range(110)],
        )
    cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded")

    sg.rules.should.be.empty
    # authorize a rule targeting a different sec group (because this count too)
    success = ec2_conn.authorize_security_group(
        group_id=sg.id,
        ip_protocol="-1",
        src_security_group_group_id=other_sg.id)
    success.should.be.true
    # fill the rules up the limit
    success = ec2_conn.authorize_security_group(
        group_id=sg.id,
        ip_protocol="-1",
        cidr_ip=["{0}.0.0.0/0".format(i) for i in range(49)],
    )
    # verify that we cannot authorize past the limit for a CIDR IP
    success.should.be.true
    with pytest.raises(EC2ResponseError) as cm:
        ec2_conn.authorize_security_group(group_id=sg.id,
                                          ip_protocol="-1",
                                          cidr_ip=["100.0.0.0/0"])
    cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded")
    # verify that we cannot authorize past the limit for a different sec group
    with pytest.raises(EC2ResponseError) as cm:
        ec2_conn.authorize_security_group(
            group_id=sg.id,
            ip_protocol="-1",
            src_security_group_group_id=other_sg.id)
    cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded")

    # EGRESS
    # authorize a rule targeting a different sec group (because this count too)
    ec2_conn.authorize_security_group_egress(group_id=sg.id,
                                             ip_protocol="-1",
                                             src_group_id=other_sg.id)
    # fill the rules up the limit
    # remember that by default, when created a sec group contains 1 egress rule
    # so our other_sg rule + 48 CIDR IP rules + 1 by default == 50 the limit
    for i in range(1, 49):
        ec2_conn.authorize_security_group_egress(
            group_id=sg.id, ip_protocol="-1", cidr_ip="{0}.0.0.0/0".format(i))
    # verify that we cannot authorize past the limit for a CIDR IP
    with pytest.raises(EC2ResponseError) as cm:
        ec2_conn.authorize_security_group_egress(group_id=sg.id,
                                                 ip_protocol="-1",
                                                 cidr_ip="50.0.0.0/0")
    cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded")
    # verify that we cannot authorize past the limit for a different sec group
    with pytest.raises(EC2ResponseError) as cm:
        ec2_conn.authorize_security_group_egress(group_id=sg.id,
                                                 ip_protocol="-1",
                                                 src_group_id=other_sg.id)
    cm.value.error_code.should.equal("RulesPerSecurityGroupLimitExceeded")
def rebundle(reboot_if_needed=False, euca=False):
    """
    Rebundles the EC2 instance that is passed as the -H parameter
    This script handles all aspects of the rebundling process and is (almost) fully automated.
    Two things should be edited and provided before invoking it: AWS account information 
    and the desired size of the root volume for the new instance.  
     
    :rtype: bool
    :return: If instance was successfully rebundled and an AMI ID was received,
             return True.
             False, otherwise.
    """
    _check_fabric_version()
    time_start = dt.datetime.utcnow()
    print "Rebundling instance '%s'. Start time: %s" % (env.hosts[0], time_start)
    _amazon_ec2_environment()
    flag=1
    if boto:
        # Select appropriate region:
        availability_zone = run("curl --silent http://169.254.169.254/latest/meta-data/placement/availability-zone")
        instance_region = availability_zone#[:-1] # Truncate zone letter to get region name
        print(red(instance_region))
        # TODO modify _get_ec2_conn to take the url parameters
        #ec2_conn = _get_ec2_conn(instance_region)
        #region = RegionInfo(name="fog", endpoint="172.17.31.11:8773")
        #region = RegionInfo(name="Eucalyptus", endpoint="172.17.31.11:8773")
        region = RegionInfo(None, "eucalyptus", "172.17.31.11")

        aws_access_key_id = ''  
        aws_secret_access_key = ''
        if  os.environ['EC2_ACCESS_KEY']:
            aws_access_key_id = os.environ['EC2_ACCESS_KEY'] 
            aws_secret_access_key=os.environ['EC2_SECRET_KEY']
        else:
            aws_access_key_id = os.environ['AWS_ACCESS_KEY'] 
            aws_secret_access_key=os.environ['AWS_SECRET_KEY']

        ec2_conn = boto.connect_ec2(aws_access_key_id , aws_secret_access_key,
                                    port=8773,
                                    region=region, path="/services/Eucalyptus",
                                    is_secure=False)
        

        #ec2_conn = boto.connect_ec2(host="172.17.31.11:8773", region=region, path="/services/Eucalyptus")
        vol_size = 5 # This will be the size (in GB) of the root partition of the new image
        
        # hostname = env.hosts[0] # -H flag to fab command sets this variable so get only 1st hostname
        instance_id = run("curl --silent http://169.254.169.254/latest/meta-data/instance-id")
        print(red(instance_id))
        
        # Handle reboot if required
        if not _reboot(instance_id, reboot_if_needed):
            return False # Indicates that rebundling was not completed and should be restarted
        
        #_clean() # Clean up the environment before rebundling
        image_id = None
        kernel_id = run("curl --silent http://169.254.169.254/latest/meta-data/kernel-id")
        print(red(kernel_id))
        if instance_id and availability_zone and kernel_id:
            #print "Rebundling instance with ID '%s' in region '%s'" % (instance_id, ec2_conn.region.name)
            try:
                print "Rebundling instance with ID '%s' in region '%s'" % (instance_id, ec2_conn.region.name)
                # instance region and availability zone is the same for eucalyptus
                # Need 2 volumes - one for image (rsync) and the other for the snapshot (see instance-to-ebs-ami.sh)
                vol = ec2_conn.create_volume(vol_size, availability_zone)
                #vol = ec2_conn.create_volume(vol_size, instance_region)
                vol2 = ec2_conn.create_volume(vol_size, availability_zone)
                #vol2 = ec2_conn.create_volume(vol_size,instance_region)
                # TODO: wait until it becomes 'available'
                print "Created 2 new volumes of size '%s' with IDs '%s' and '%s'" % (vol_size, vol.id, vol2.id)
            except EC2ResponseError, e:
                print(red("Error creating volume: %s" % e))
                return False
            
            
            if vol:
                try:
                    # Attach newly created volumes to the instance
                    #dev_id = '/dev/sdh'
                    dev_id = '/dev/vda'
                    if not _attach(ec2_conn, instance_id, vol.id, dev_id, euca):
                        print(red("Error attaching volume '%s' to the instance. Aborting." % vol.id))
                        vol = ec2_conn.delete_volume(vol.id)
                        return False

                    #dev_id = '/dev/sdj'
                    dev_id = '/dev/vdb'
                    if not _attach(ec2_conn, instance_id, vol2.id, dev_id, euca):
                        print(red("Error attaching volume '%s' to the instance. Aborting." % vol2.id))
                        vol = ec2_conn.delete_volume(vol2.id)
                        return False

                    if euca:

                        sudo('mkfs.ext3 /dev/vda')
                        sudo('mkdir -m 000 -p /mnt/ebs')
                        sudo('mount /dev/vda /mnt/ebs')
                        put('euca2-*-x509.zip','/tmp')
                        run('unzip /tmp/euca2-*-x509.zip -d /tmp')

                        #is it probably in something is it doing in previous methods ?
                        #problem here (22 login refused to instance if bundled via Fabric, no problem if bundled manually)

                        sudo('euca-bundle-vol --ec2cert /tmp/cloud-cert.pem -c /tmp/euca2-admin-9bc9c71a-cert.pem -k /tmp/euca2-admin-9bc9c71a-pk.pem -u 59242150790379988457748463773923344394 -s 5000 -d /mnt/ebs -p  cloudman -e /root,/etc/udev,/var/lib/ec2,/mnt,/proc,/tmp,/var/lib/rabbitmq/mnesia')
                        run('euca-upload-bundle --config /tmp/eucarc -m /mnt/ebs/cloudman.manifest.xml -b cloudman')
                        run('euca-register --config /tmp/eucarc cloudman/cloudman.manifest.xml')
#                        run('uec-publish-image -l all -t image -k eki-650A174A -r none x86_64 /mnt/ebs/cloudman.img cloudman-uec')
                        _detach(ec2_conn, instance_id, vol.id)
                        _detach(ec2_conn, instance_id, vol2.id)
                        ec2_conn.delete_volume(vol.id)
                        ec2_conn.delete_volume(vol2.id)

                    else:
                    # Move the file system onto the new volume (with a help of a script)
                         url = os.path.join(REPO_ROOT_URL, "instance-to-ebs-ami.sh")
                         # with contextlib.nested(cd('/tmp'), settings(hide('stdout', 'stderr'))):
                         with cd('/tmp'):
                             if exists('/tmp/'+os.path.split(url)[1]):
                                 sudo('rm /tmp/'+os.path.split(url)[1])
                             sudo('wget %s' % url)
                             sudo('chmod u+x /tmp/%s' % os.path.split(url)[1])
                             sudo('./%s' % os.path.split(url)[1])
                    # Detach the new volume
                         _detach(ec2_conn, instance_id, vol.id)
                         _detach(ec2_conn, instance_id, vol2.id)
                         answer = confirm("Would you like to terminate the instance used during rebundling?", default=False)
                         if answer:
                             ec2_conn.terminate_instances([instance_id])
                         # Create a snapshot of the new volume
                         commit_num = local('cd %s; hg tip | grep changeset | cut -d: -f2' % os.getcwd()).strip()
                         snap_id = _create_snapshot(ec2_conn, vol.id, "AMI: galaxy-cloudman (using mi-deployment at commit %s)" % commit_num)
                         # Register the snapshot of the new volume as a machine image (i.e., AMI)
                         arch = 'x86_64'
                         root_device_name = '/dev/sda1'
                         # Extra info on how EBS image registration is done: http://markmail.org/message/ofgkyecjktdhofgz
                         # http://www.elastician.com/2009/12/creating-ebs-backed-ami-from-s3-backed.html
                         # http://www.shlomoswidler.com/2010/01/creating-consistent-snapshots-of-live.html
                         ebs = BlockDeviceType()
                         ebs.snapshot_id = snap_id
                         ebs.delete_on_termination = True
                         ephemeral0_device_name = '/dev/sdb'
                         ephemeral0 = BlockDeviceType()
                         ephemeral0.ephemeral_name = 'ephemeral0'
                         ephemeral1_device_name = '/dev/sdc'
                         ephemeral1 = BlockDeviceType()
                         ephemeral1.ephemeral_name = 'ephemeral1'
                         # ephemeral2_device_name = '/dev/sdd' # Needed for instances w/ 3 ephemeral disks
                         # ephemeral2 = BlockDeviceType()
                         # ephemeral2.ephemeral_name = 'ephemeral2'
                         # ephemeral3_device_name = '/dev/sde' # Needed for instances w/ 4 ephemeral disks
                         # ephemeral3 = BlockDeviceType()
                         # ephemeral3.ephemeral_name = 'ephemeral3'
                         block_map = BlockDeviceMapping()
                         block_map[root_device_name] = ebs
                         block_map[ephemeral0_device_name] = ephemeral0
                         block_map[ephemeral1_device_name] = ephemeral1
                         name = 'galaxy-cloudman-%s' % time_start.strftime("%Y-%m-%d")
                         image_id = ec2_conn.register_image(name, description=AMI_DESCRIPTION, architecture=arch, kernel_id=kernel_id, root_device_name=root_device_name, block_device_map=block_map)
                         answer = confirm("Volume with ID '%s' was created and used to make this AMI but is not longer needed. Would you like to delete it?" % vol.id)
                         if answer:
                             ec2_conn.delete_volume(vol.id)
                         print "Deleting the volume (%s) used for rsync only" % vol2.id
                         ec2_conn.delete_volume(vol2.id)
                         print(green("--------------------------"))
                         print(green("Finished creating new machine image. Image ID: '%s'" % (image_id)))
                         print(green("--------------------------"))
                         answer = confirm("Would you like to make this machine image public?", default=False)
                         if image_id and answer:
                             ec2_conn.modify_image_attribute(image_id, attribute='launchPermission', operation='add', groups=['all'])
                    

                except EC2ResponseError, e:
                    print(red("Error creating image: %s" % e))
                    return False
            else:
                print(red("Error creating new volume"))
                return False
Example #58
0
def launch():
    config = readconfig()
    MY_AMI = config.get('ec2', 'AMI')
    SECURITY_GROUP = config.get('ec2', 'SECURITY_GROUP')
    KEY_PATH = config.get('ec2', 'KEY_PATH')
    INSTANCE_TYPE = config.get('ec2', 'INSTANCE_TYPE')

    launch = True

    if config.has_option('ec2', 'HOST'):
        host = config.get('ec2', 'HOST')
        if host != "" and host is not None:
            print "there is already an instance launched"
            launch = False
            return

    if launch:
        conn = boto.connect_ec2()
        image = conn.get_image(MY_AMI)
        security_groups = conn.get_all_security_groups()

        try:
            [geonode_group
             ] = [x for x in security_groups if x.name == SECURITY_GROUP]
        except ValueError:
            # this probably means the security group is not defined
            # create the rules programatically to add access to ports 21, 22, 80, 2300-2400, 8000, 8001, 8021 and 8080
            geonode_group = conn.create_security_group(SECURITY_GROUP,
                                                       'Cool GeoNode rules')
            geonode_group.authorize('tcp', 21, 21,
                                    '0.0.0.0/0')  # Batch Upload FTP
            geonode_group.authorize('tcp', 22, 22, '0.0.0.0/0')  # SSH
            geonode_group.authorize('tcp', 80, 80, '0.0.0.0/0')  # Apache
            geonode_group.authorize('tcp', 2300, 2400,
                                    '0.0.0.0/0')  # Passive FTP
            geonode_group.authorize('tcp', 8000, 8001,
                                    '0.0.0.0/0')  # Dev Django and Jetty
            geonode_group.authorize('tcp', 8021, 8021,
                                    '0.0.0.0/0')  # Batch Upload FTP
            geonode_group.authorize('tcp', 8080, 8080, '0.0.0.0/0')  # Tomcat

        try:
            [geonode_key
             ] = [x for x in conn.get_all_key_pairs() if x.name == 'geonode']
        except ValueError:
            # this probably means the key is not defined
            # get the first one in the belt for now:
            print "GeoNode file not found in the server"
            geonode_key = conn.get_all_key_pairs()[0]

        reservation = image.run(security_groups=[
            geonode_group,
        ],
                                key_name=geonode_key.name,
                                instance_type=INSTANCE_TYPE)
        instance = reservation.instances[0]

        print "Firing up instance"

        # Give it 10 minutes to appear online
        for i in range(120):
            time.sleep(5)
            instance.update()
            print instance.state
            if instance.state == "running":
                break

        if instance.state == "running":
            dns = instance.dns_name
            print "Instance up and running at %s" % dns

        config.set('ec2', 'HOST', dns)
        config.set('ec2', 'INSTANCE', instance.id)
        writeconfig(config)

        print "ssh -i %s ubuntu@%s" % (KEY_PATH, dns)
        print "Terminate the instance via the web interface %s" % instance

        time.sleep(20)
Example #59
0
def test_basic_connect():
    boto.connect_ec2()
Example #60
0
    def _connect(self):
        """Connects to the ec2 cloud provider

        :return: :py:class:`boto.ec2.connection.EC2Connection`
        :raises: Generic exception on error
        """
        # check for existing connection
        if self._ec2_connection:
            return self._ec2_connection

        if not self._vpc:
            vpc_connection = None

        try:
            log.debug("Connecting to ec2 host %s", self._ec2host)
            region = ec2.regioninfo.RegionInfo(name=self._region_name,
                                               endpoint=self._ec2host)

            # connect to webservice
            ec2_connection = boto.connect_ec2(
                aws_access_key_id=self._access_key,
                aws_secret_access_key=self._secret_key,
                is_secure=self._secure,
                host=self._ec2host,
                port=self._ec2port,
                path=self._ec2path,
                region=region)
            log.debug("EC2 connection has been successful.")

            if self._vpc:
                vpc_connection = boto.connect_vpc(
                    aws_access_key_id=self._access_key,
                    aws_secret_access_key=self._secret_key,
                    is_secure=self._secure,
                    host=self._ec2host,
                    port=self._ec2port,
                    path=self._ec2path,
                    region=region)
                log.debug("VPC connection has been successful.")

                for vpc in vpc_connection.get_all_vpcs():
                    log.debug("Checking whether %s matches %s/%s" %
                              (self._vpc, vpc.tags['Name'], vpc.id))
                    if self._vpc in [vpc.tags['Name'], vpc.id]:
                        self._vpc_id = vpc.id
                        if self._vpc != self._vpc_id:
                            log.debug("VPC %s matches %s" %
                                      (self._vpc, self._vpc_id))
                        break
                else:
                    raise VpcError('VPC %s does not exist.' % self._vpc)

            # list images to see if the connection works
            # images = self._ec2_connection.get_all_images()
            # log.debug("%d images found on cloud %s",
            #           len(images), self._ec2host)

        except Exception as e:
            log.error(
                "connection to ec2 could not be "
                "established: message=`%s`", str(e))
            raise

        self._ec2_connection, self._vpc_connection = (ec2_connection,
                                                      vpc_connection)
        return self._ec2_connection