def create_box(): old_ids = set(i.id for i in ec2.get_only_instances()) machine = ec2.run_instances(AMI_ID, key_name=KEYNAME, min_count=NODE_COUNT, max_count=NODE_COUNT, security_groups=[SECURITY_GROUP,], instance_type=os.environ.get('EC2_INSTANCE_TYPE', 'm3.medium')) new_instances = [i for i in ec2.get_only_instances() if i.id not in old_ids] for new_instance in new_instances: print("new instance:", new_instance.id) ec2.create_tags([new_instance.id], {"billingProject": "aei"}) is_running = [False] * len(new_instances) while not all(is_running): for count, newinstance in enumerate(new_instances): is_running[count] = new_instance.state == u'running' time.sleep(3) for new_instance in new_instances: new_instance.update() is_reachable = [False] * len(new_instances) while not all(is_reachable): instance_ids=[new_instance.id for new_instance in new_instances] inst_statuses = ec2.get_all_instance_status(instance_ids=instance_ids) is_reachable = [inst_status.system_status.details['reachability'] != 'passed' for inst_status in inst_statuses] time.sleep(3) time.sleep(1) for new_instance in new_instances: assert new_instance.public_dns_name print("public dns name:", new_instance.public_dns_name) return new_instances
def create_volume(volume_size, device_name): ec2 = ec2 = boto.ec2.connect_to_region(region) azone = ec2.get_only_instances()[0].placement volume = ec2.create_volume(volume_size, azone) while volume.status != 'available': print '#' time.sleep(5) volume.update() instance_id = ec2.get_only_instances()[0].id if instance_id == u'i-6b46fc53': volume.attach(instance_id, device_name)
def create_box(): old_ids = set(i.id for i in ec2.get_only_instances()) machine = ec2.run_instances(AMI_ID, key_name=KEYNAME, security_groups=[os.environ.get('SECURITY_GROUP', 'memex-explorer-prod'),], instance_type='m3.2xlarge') new_instance = [i for i in ec2.get_only_instances() if i.id not in old_ids][0] print(new_instance.id) while new_instance.state != u'running': time.sleep(3) new_instance.update() while ec2.get_all_instance_status(instance_ids=[new_instance.id])[0].system_status.details['reachability'] != 'passed': time.sleep(3) time.sleep(1) assert new_instance.public_dns_name print(new_instance.public_dns_name) return new_instance
def get_instance(): """ Returns the boto.ec2.Instance object for this instance. """ global _instance global _instance try: return _instance except NameError: region = get_region() instance_id = get_instance_id() ec2 = boto.ec2.connect_to_region(region) if ec2 is None: raise ValueError("Unable to connect to EC2 endpoint in region %r" % (region, )) instances = ec2.get_only_instances([instance_id]) if len(instances) == 0: raise ValueError("Could not find instance id %r" % (instance_id, )) if len(instances) > 1: raise ValueError("Multiple instances returned for instance id %r" % (instance_id, )) _instance = instances[0] return _instance
def printStatusInstance(ec2Id): print "Checking instance status:" ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=accessKeyId, aws_secret_access_key=accessKeySecret) monitor = ec2.get_only_instances(instance_ids=ec2Id) for instance in monitor: print instance.id + ' : ' + instance.state sys.exit(0)
def get_stack_instance_details(stack): global ec2 global elb_conn instances_info = \ ec2.get_only_instances(filters={'tag:aws:cloudformation:stack-id': stack.stack_id}) instances_health = elb_conn.describe_instance_health(stack.stack_name) instances = list() for ii in instances_info: for ih in instances_health: if ih.instance_id == ii.id: instance = { 'instance_id': ii.id, 'private_ip': ii.private_ip_address, 'launch_time': parse_time(ii.launch_time) } if ih.state == 'InService': instance['role'] = 'MASTER' else: instance['role'] = 'REPLICA' instances.append(instance) instances.sort(key=lambda k: (k['role'], k['instance_id'])) return instances
def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, AnsibleAWSError) as e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0]
def get_url(app_name, env_name): protocol = 'http://' # TODO detect if load balancer supports HTTPS app = App(env_name, app_name) bucket_name = '{}-{}'.format(config.get('system_name', uuid.uuid1().hex), app.repo.name) ec2 = boto.connect_ec2() elb = boto.connect_elb() s3 = boto.connect_s3() b = s3.lookup(bucket_name) if b is not None: return protocol + b.get_website_endpoint() lb = None try: lbresult = elb.get_all_load_balancers( load_balancer_names=['{}-{}'.format(app.env_name, app.repo.name)]) lb = lbresult[0] if len(lbresult) > 0 else None except boto.exception.BotoServerError: pass if lb is None: instances = ec2.get_only_instances( filters={ 'tag:app': app.name, 'tag:deployed': 'true', 'instance-state-name': 'running' }) if len(instances) != 1: return None else: return protocol + instances[0].public_dns_name return protocol + lb.dns_name
def get_url(app_name, env_name): protocol = 'http://' # TODO detect if load balancer supports HTTPS app = App(env_name, app_name) bucket_name = '{}-{}'.format(config.get('system_name', uuid.uuid1().hex), app.repo.name) ec2 = boto.connect_ec2() elb = boto.connect_elb() s3 = boto.connect_s3() b = s3.lookup(bucket_name) if b is not None: return protocol + b.get_website_endpoint() lb = None try: lbresult = elb.get_all_load_balancers(load_balancer_names=['{}-{}'.format(app.env_name, app.repo.name)]) lb = lbresult[0] if len(lbresult) > 0 else None except boto.exception.BotoServerError: pass if lb is None: instances = ec2.get_only_instances(filters={'tag:app': app.name, 'tag:deployed': 'true', 'instance-state-name': 'running'}) if len(instances) != 1: return None else: return protocol + instances[0].public_dns_name return protocol + lb.dns_name
def whoAmI(): ec2 = boto.ec2.connect_to_region( "us-west-1", aws_access_key_id="AKIAINZ2TABEYCFH7SMQ", aws_secret_access_key="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") myID = requests.get("http://169.254.169.254/latest/meta-data/instance-id", timeout=1.0).text instance = ec2.get_only_instances([myID])[0] spotRequestId = instance.spot_instance_request_id if spotRequestId is not None: spotRequest = ec2.get_all_spot_instance_requests([spotRequestId])[0] tags = spotRequest.tags for key, value in tags.items(): if key not in instance.tags: instance.add_tag(key, value) return ec2.get_only_instances([myID])[0].tags
def process_region(region): print('Processing region %s' % region.name) ec2 = region.connect() print('Scanning instances...\n') instances = ec2.get_only_instances() for inst in instances: inst_accessor = Instance(inst) process_instance(inst_accessor) print('\nScanning images...\n') images = ec2.get_all_images(None, ('self')) for image in images: image_accessor = Image(image) process_image(image_accessor) print('\nScanning volumes...\n') volumes = ec2.get_all_volumes() for volume in volumes: volume_accessor = Volume(volume) process_volume(volume_accessor) print('\nScanning snapshots...\n') snapshots = ec2.get_all_snapshots(owner='self') for snapshot in snapshots: process_snapshot(Snapshot(snapshot))
def get_instance(): """ Returns the boto.ec2.Instance object for this instance. """ global _instance global _instance try: return _instance except NameError: region = get_region() instance_id = get_instance_id() ec2 = boto.ec2.connect_to_region(region) if ec2 is None: raise ValueError("Unable to connect to EC2 endpoint in region %r" % (region,)) instances = ec2.get_only_instances([instance_id]) if len(instances) == 0: raise ValueError("Could not find instance id %r" % (instance_id,)) if len(instances) > 1: raise ValueError("Multiple instances returned for instance id %r" % (instance_id,)) _instance = instances[0] return _instance
def get_instance_by_id(instance_id, region): """ Returns the boto.ec2.Instance object with id `instance_id`. """ ec2 = boto.ec2.connect_to_region(region) instances = ec2.get_only_instances(instance_ids=[instance_id]) return instances[0]
def get_stack_instance_details(stack): global ec2 global elb_conn instances_info = \ ec2.get_only_instances(filters={'tag:aws:cloudformation:stack-id': stack.stack_id}) instances_health = elb_conn.describe_instance_health(stack.stack_name) instances = list() for ii in instances_info: for ih in instances_health: if ih.instance_id == ii.id: instance = {'instance_id': ii.id, 'private_ip': ii.private_ip_address, 'launch_time': parse_time(ii.launch_time)} if ih.state == 'InService': instance['role'] = 'MASTER' else: instance['role'] = 'REPLICA' instances.append(instance) instances.sort(key=lambda k: (k['role'], k['instance_id'])) return instances
def process_region(region): print "Processing region %s" % region.name ec2 = region.connect() print "Scanning instances...\n" instances = ec2.get_only_instances() for inst in instances: inst_accessor = Instance(inst) process_instance(inst_accessor) print "\nScanning images...\n" images = ec2.get_all_images(None, ("self")) for image in images: image_accessor = Image(image) process_image(image_accessor) print "\nScanning volumes...\n" volumes = ec2.get_all_volumes() for volume in volumes: volume_accessor = Volume(volume) process_volume(volume_accessor) print "\nScanning snapshots...\n" snapshots = ec2.get_all_snapshots(owner="self") for snapshot in snapshots: process_snapshot(Snapshot(snapshot))
def add_record(id): # queue the route53 addition (may take time for ip address to get assigned) instance = ec2.get_only_instances([id])[0] if instance.public_dns_name is not None \ and len(instance.public_dns_name) > 0: dns_name = instance.public_dns_name try: # get the tag data from the instance d = json.loads(instance.tags['dns']) name = "%s.%s" % (d['record'], d['zone']) except: raise AutorouteError("unable to get dns information") zone = r53.get_zone(d['zone']) records = zone.find_records(name, 'CNAME', all=True) # add DNS record # is this a list, or just one. if records is None: # straight up add logger.debug('no matching records, add') pass elif isinstance(records, list): for r in records: if r.identifier == id: # is dns the same if dns_name in r.resource_records: logger.debug('found matching record, skip') return else: # update or delete. logger.debug('found matching record, update') zone.update_record(r, dns_name, new_identifier=(id, 1)) return elif records.identifier == id: # is dns the same if dns_name in records.resource_records: logger.debug('found matching record, skip') return else: logger.debug('found matching record, update') zone.update_record(records, dns_name, new_identifier=(id, 1)) return # this is a new record logger.debug( 'adding record %s CNAME %s, (%s,1)' % (name, dns_name, id)) zone.add_record( "CNAME", name, dns_name, ttl=60, identifier=(id, 1), comment='autoscaling group increased') return
def all(self): if self._instances is None: instances = [] for region_name in self._regions: ec2 = cxn.ec2(region_name) for instance in ec2.get_only_instances(filters=self._filters): instances.append(instance) self._instances = instances return self._instances
def manage_instances(profile): """Manager instances findable by AWS API Args: profile """ logger.info("Checking %s, %s" % (profile['name'], profile['region'])) ec2 = boto.ec2.connect_to_region(profile['region'], aws_access_key_id=profile['id'], aws_secret_access_key=profile['key']) # AWS is case sensitive, so check for the common cases manually filters = { 'tag-key': ['Schedule', 'schedule'], 'instance-state-name': ['running', 'stopped'] } scheduled_instances = ec2.get_only_instances(filters=filters) parsed_instances = parse_instances(scheduled_instances) base = datetime.now() base = base.replace(second=0, microsecond=0) to_start = [] to_stop = [] for instance in parsed_instances: try: ignore = False should_be_running = False for cron_schedule in instance.cron_schedules: cron_schedule = cron_schedule.strip() if bool(cron_schedule): logger.debug("%sChecking schedule: %s" % (CHILD_INDENT, cron_schedule)) next_run = croniter(cron_schedule, base).get_next(datetime) logger.debug("%sNext run: %s" % (CHILD_INDENT, next_run)) gap = (next_run - base).total_seconds() if gap < TIME_EPSILON: should_be_running = True logger.debug("%s%s (%s) - Fire it up!" % ( CHILD_INDENT, instance.name, instance.instance_id)) else: ignore = True if not ignore: if should_be_running: if instance.state == 'stopped': to_start.append(instance.instance_id) elif instance.state == 'running': to_stop.append(instance.instance_id) logger.debug("%s%s (%s) - That's all folks!" % ( CHILD_INDENT, instance.name, instance.instance_id)) except Exception as e: logger.error("%s%s" % (CHILD_INDENT, e)) start_instances(ec2, to_start) stop_instances(ec2, to_stop)
def get_cluster_instances(group_name): ec2 = get_aws_connection('ec2') group = get_autoscaling_group(group_name) instance_ids = [i.instance_id for i in group.instances] if len(instance_ids) > 0: return ec2.get_only_instances(instance_ids=instance_ids) else: return []
def get_info(app_name, env_name): ec2 = boto.connect_ec2() elb = boto.connect_elb() lb = None app = App(env_name, app_name) try: lbresult = elb.get_all_load_balancers(load_balancer_names=['{}-{}'.format(env_name, app_name)]) lb = lbresult[0] if len(lbresult) > 0 else None except boto.exception.BotoServerError: pass if lb is None: instances = ec2.get_only_instances(filters={'tag:app': '{}/{}'.format(app.env_name, app.repo.name), 'tag:deployed': 'true', 'instance-state-name': 'running'}) datas = [{'instance_id': instance.id, 'public_dns_name': instance.public_dns_name, 'ssh_command': get_ssh_command(instance.id)} for instance in instances] for data in datas: print '-----> Instance {}'.format(data['instance_id']) print ' DNS: {}'.format(data['public_dns_name']) print ' SSH: {}'.format(data['ssh_command']) if len(datas) == 0: print 'No deployment found' sys.exit(1) else: return datas print '-----> Load Balancer' print ' Name: {}'.format(lb.name) print ' DNS: {}'.format(lb.dns_name) i = 0 for inst in lb.get_instance_health(): i += 1 print '-----> Instance #{}'.format(i) inst_id = inst.instance_id print ' ID: {}'.format(inst_id) print ' State: {}'.format(inst.state) instance = ec2.get_only_instances(instance_ids=[inst_id])[0] print ' DNS: {}'.format(instance.public_dns_name) print ' SSH: {}'.format(get_ssh_command(inst_id)) if i == 0: print '-----> Instances' print ' None'
def get_info(app_name, env_name): ec2 = boto.connect_ec2() elb = boto.connect_elb() lb = None try: lbresult = elb.get_all_load_balancers( load_balancer_names=['{}-{}'.format(env_name, app_name)]) lb = lbresult[0] if len(lbresult) > 0 else None except boto.exception.BotoServerError: pass if lb is None: instances = ec2.get_only_instances( filters={ 'tag:Name': '{}/{}'.format(env_name, app_name), 'tag:deployed': 'true', 'instance-state-name': 'running' }) if len(instances) > 0: for instance in instances: print '-----> Instance {}'.format(instance.id) print ' DNS: {}'.format(instance.public_dns_name) print ' SSH: {}'.format(get_ssh_command(instance.id)) return else: print 'No deployment found' sys.exit(1) print '-----> Load Balancer' print ' Name: {}'.format(lb.name) print ' DNS: {}'.format(lb.dns_name) i = 0 for inst in lb.get_instance_health(): i += 1 print '-----> Instance #{}'.format(i) inst_id = inst.instance_id print ' ID: {}'.format(inst_id) print ' State: {}'.format(inst.state) instance = ec2.get_only_instances(instance_ids=[inst_id])[0] print ' DNS: {}'.format(instance.public_dns_name) print ' SSH: {}'.format(get_ssh_command(inst_id)) if i == 0: print '-----> Instances' print ' None'
def instance_info(region): ec2 = boto.ec2.connect_to_region(region) info = {} instances = ec2.get_only_instances( filters={'instance-state-name': 'running'}) for instance in instances: info[instance.id] = {'region': region, 'ami_id': instance.image_id} ami_desc = ec2.get_all_images(instance.image_id) if ami_desc: info[instance.id]['ami_desc'] = ami_desc[0].description return info
def kill_instances(ec2, keyname, min_age, dry_run): now = datetime.utcnow() instances = ec2.get_only_instances(filters={'key-name': keyname}) kill_list = [] for instance in instances: launchtime = datetime.strptime(instance.launch_time, '%Y-%m-%dT%H:%M:%S.%fZ') if (now - launchtime) > min_age: print "Going to kill", instance.id, "launched", instance.launch_time kill_list.append(instance.id) if not dry_run: ec2.terminate_instances(kill_list, dry_run)
def instances_by_user(args, ec2, user_set=None): result = {} instance_tags = ec2.get_all_tags({'resource_type': 'instance', 'key': 'saved_for_user'}) for tag in instance_tags: user = tag.value if user_set and user not in user_set: continue instance = ec2.get_only_instances(instance_ids=[tag.res_id])[0] if instance.state == 'terminated' or instance.state == 'shutting-down': continue result.setdefault(user, []).append(instance) return result
def show_co(region): ec2 = boto.ec2.connect_to_region(region) instances = ec2.get_only_instances() for ins in instances: co = ins.get_console_output() with open('ins.log', 'a+') as f: f.write(str(ins) + '\n') f.write(co.output)
def get_dns_names(location, placement_group='*', instance_type='*'): ec2 = boto.ec2.connect_to_region(location) dns_lists = {} dns_lists["public"] = [] dns_lists["private"] = [] instances = ec2.get_only_instances(filters={ "placement-group-name": placement_group, "instance-type": instance_type }) for inst in instances: if inst.state == 'running': dns_lists["public"].append(inst.public_dns_name) dns_lists["private"].append(inst.private_dns_name) return dns_lists
def get_ip_addresses(location, placement_group='*', instance_type='*'): ec2 = boto.ec2.connect_to_region(location) ip_lists = {} ip_lists["public"] = [] ip_lists["private"] = [] instances = ec2.get_only_instances(filters={ "placement-group-name": placement_group, "instance-type": instance_type }) for inst in instances: if inst.state == 'running': ip_lists["public"].append(inst.ip_address) ip_lists["private"].append(inst.private_ip_address) return ip_lists
def sync_group(): id = "i-f4bd6ea9" i = ec2.get_only_instances([id])[0] g = i.tags['aws:autoscaling:groupName'] s = autoscale.get_all_groups(names=[g])[0] for ai in s.instances: #ai_id = ai.instance_id # get instance # get public dns # check r53 pass
def get_all_group_instances_and_conn(): conn = AutoScaleConnection() global autoscale_conn autoscale_conn = conn ec2 = boto.ec2.connect_to_region('us-east-1') groups = conn.get_all_groups( names=['SCCluster1', 'SCCluster2', 'SCCluster3', 'SCCluster4']) # TODO: update this list instances = [instance for group in groups for instance in group] if not instances: sys.exit() instance_ids = [instance.instance_id for instance in instances] instances = ec2.get_only_instances(instance_ids) return instances, conn
def main(): ec2 = boto.ec2.connect_to_region('us-west-2') instances = ec2.get_only_instances( filters={'instance-state-name': 'running'}) #filters={'tag:runner': "true"}) print json.dumps([{ "id": instance.id, "public": instance.public_dns_name, "private": instance.private_dns_name, "placement": instance.placement, "ip": instance.ip_address, } for instance in sorted(instances, key=lambda i: parser.parse(i.launch_time))], indent=1)
def get_consul_instances(region): """Return list of instance objects for all consul instances in this account/region""" all_instances = ec2.get_only_instances() consul_servers = [] # filter for only the consul instances print "Looking for Consul Server instances..." for i in all_instances: if i.tags['Name'] == 'consul-server' and i.state == 'running': print " Consul Server:", i consul_servers.append(i) return consul_servers
def wait_for_instances_to_start(location, num, placement_group='*'): ec2 = boto.ec2.connect_to_region(location) ready_count = 0 while (True): ready_count = 0 instances = ec2.get_only_instances( filters={"placement-group-name": placement_group}) for inst in instances: if inst.state == 'running': ready_count += 1 print "-> number of ready instances: " + str( ready_count) + " (out of " + str(num) + ") ..." if (ready_count >= num): break time.sleep(2)
def autoscale_group_hosts(group_name): import boto.ec2 from boto.ec2.autoscale import AutoScaleConnection ec2 = boto.connect_ec2() conn = AutoScaleConnection() groups = conn.get_all_groups(names=[]) groups = [ group for group in groups if group.name.startswith(group_name) ] instance_ids = [] instances = [] for group in groups: print group.name instance_ids.extend([i.instance_id for i in group.instances]) instances.extend(ec2.get_only_instances(instance_ids)) return [i.private_ip_address for i in instances], instances[0].id, instances[0].tags.get("aws:autoscaling:groupName")
def get_all_group_instances_and_conn( groups_names=get_autoscale_groups()['groups']): conn = AutoScaleConnection() global autoscale_conn autoscale_conn = conn ec2 = boto.ec2.connect_to_region('us-east-1') selected_group_name = random.choice(groups_names) logger.info('Selected autoscale group: %s' % selected_group_name) group = conn.get_all_groups(names=[selected_group_name])[0] if not group.instances: logger.info("No working instances in selected group %s" % selected_group_name) upload_logs_to_s3() sys.exit() instance_ids = [i.instance_id for i in group.instances] instances = ec2.get_only_instances(instance_ids) return instances, conn
def get(account_name, region): log.info('retrieving instances for %s %s' % (account_name, region)) account = config.accounts[account_name] ec2 = connect_ec2(account, region) instances = ec2.get_only_instances() for instance in instances: address = getattr(instance, 'ip_address', None) if not address: continue if region == config.region: address = instance.private_ip_address yield str(address)
def autoscale_group_hosts(group_name): import boto.ec2 from boto.ec2.autoscale import AutoScaleConnection ec2 = boto.connect_ec2() conn = AutoScaleConnection() groups = conn.get_all_groups(names=[]) groups = [group for group in groups if group.name.startswith(group_name)] instance_ids = [] instances = [] for group in groups: print "group name:", group.name instance_ids.extend([i.instance_id for i in group.instances]) instances.extend(ec2.get_only_instances(instance_ids)) return set([ i.private_ip_address for i in instances ]), instances[0].id, instances[0].tags.get("aws:autoscaling:groupName")
def manage_instances(region): """Manager instances findable by AWS API Args: region: list, regions to query for instances """ logging.debug("Checking %s" % region) ec2 = boto.ec2.connect_to_region(region) # AWS is case sensitive, so check for the common cases manually filters = { 'tag-key': ['Schedule', 'schedule'], 'instance-state-name': ['running', 'stopped'] } scheduled_instances = ec2.get_only_instances(filters=filters) parsed_instances = parse_instances(ec2) base = datetime.now() base = base.replace(second=0, microsecond=0) to_start = [] to_stop = [] for instance in parsed_instances: should_be_running = False for cron_schedule in instance.cron_schedules: logger.debug("Checking schedule: %s" % cron_schedule) next_run = croniter(cron_schedule, base).get_next(datetime) logger.debug("Next run: %s" % next_run) gap = (next_run - base).total_seconds() if gap < TIME_EPSILON: should_be_running = True logger.debug("%s (%s) - Fire it up!" % (instance.name, instance.instance_id)) if should_be_running: if instance.state == 'stopped': to_start.append(instance.instance_id) elif instance.state == 'running': to_stop.append(instance.instance_id) logger.debug("%s (%s) - That's all folks!" % (instance.name, instance.instance_id)) start_instances(ec2, to_start) stop_instances(ec2, to_stop)
def load_hosts_list(): # first get a list of the id's of all the running instances. running = list() existing_instances = ec2.get_all_instance_status() for instance in existing_instances: if instance.system_status.status == 'ok' and instance.instance_status.status == 'ok': running.append(instance.id) # now get all the instances. instances = ec2.get_only_instances() # iterate through the instances list and copy out the ones that are running for instance in instances: for id in running: if instance.id == id: hosts.append(instance) return;
def manage_instances(region): """Manager instances findable by AWS API Args: region: list, regions to query for instances """ logging.debug("Checking %s" % region) ec2 = boto.ec2.connect_to_region(region) # AWS is case sensitive, so check for the common cases manually filters = { 'tag-key': ['Schedule', 'schedule'], 'instance-state-name': ['running', 'stopped'] } scheduled_instances = ec2.get_only_instances(filters=filters) parsed_instances = parse_instances(scheduled_instances) base = datetime.now() base = base.replace(second=0, microsecond=0) to_start = [] to_stop = [] for instance in parsed_instances: should_be_running = False for cron_schedule in instance.cron_schedules: logger.debug("Checking schedule: %s" % cron_schedule) next_run = croniter(cron_schedule, base).get_next(datetime) logger.debug("Next run: %s" % next_run) gap = (next_run - base).total_seconds() if gap < TIME_EPSILON: should_be_running = True logger.debug("%s (%s) - Fire it up!" % (instance.name, instance.instance_id)) if should_be_running: if instance.state == 'stopped': to_start.append(instance.instance_id) elif instance.state == 'running': to_stop.append(instance.instance_id) logger.debug("%s (%s) - That's all folks!" % (instance.name, instance.instance_id)) start_instances(ec2, to_start) stop_instances(ec2, to_stop)
def autoscaling_peers(): metadata = boto.utils.get_instance_identity()['document'] autoscaling = boto.ec2.autoscale.connect_to_region(metadata['region']) ec2 = boto.ec2.connect_to_region(metadata['region']) for group in autoscaling.get_all_groups(): for instance in group.instances: if instance.instance_id == metadata['instanceId']: group.instances.remove(instance) instance_ids = [i.instance_id for i in group.instances] for status in ec2.get_all_instance_status(instance_ids): if status.instance_status.status != 'ok': instance_ids.remove(status.id) if not instance_ids: return [] return ec2.get_only_instances(instance_ids)
def remove_record(id): instance = ec2.get_only_instances([id])[0] try: d = json.loads(instance.tags['dns']) name = "%s.%s" % (d['record'], d['zone']) except: raise AutorouteError("unable to get dns information") zone = r53.get_zone(d['zone']) records = zone.find_records(name, 'CNAME', all=True) if records is None: logger.debug("no records found for %s" % (name)) return elif isinstance(records, list): for r in records: if r.identifier == id: logger.debug('deleting record %s, %s' % (name, id)) zone.delete_record(r) elif records.identifier == id: logger.debug('deleting record %s, %s' % (name, id)) zone.delete_record(records)
def stop_node(): start_logging() print(" ".join(argv)) if len(argv) != 2: print("Usage: %s <nodename>" % (argv[0],), file=sys.stderr) return 1 nodename = argv[1] cc = ClusterConfiguration.from_config() region = get_region() ec2 = boto.ec2.connect_to_region(region) instances = ec2.get_only_instances(filters={"tag:SLURMHostname": nodename}) if len(instances) == 0: print("No instances found for %r" % nodename) return 1 instance_ids = [instance.id for instance in instances] print("Terminating instance(s): %s" % " ".join(instance_ids)) ec2.terminate_instances(instance_ids) return 0
def stop_node(): start_logging() print(" ".join(argv)) if len(argv) != 2: print("Usage: %s <nodename>" % (argv[0], ), file=sys.stderr) return 1 nodename = argv[1] cc = ClusterConfiguration.from_config() region = get_region() ec2 = boto.ec2.connect_to_region(region) instances = ec2.get_only_instances(filters={"tag:SLURMHostname": nodename}) if len(instances) == 0: print("No instances found for %r" % nodename) return 1 instance_ids = [instance.id for instance in instances] print("Terminating instance(s): %s" % " ".join(instance_ids)) ec2.terminate_instances(instance_ids) return 0
def instance_tags(): ec2 = boto.ec2.connect_to_region(region()) instances = ec2.get_only_instances(instance_ids=[instance_id()]) assert len(instances) == 1, "Should have one instance with id={}, not {}".format(instance_id(), instances) return instances[0].tags
#!/usr/bin/env python import boto.utils import boto.ec2 iid_doc = boto.utils.get_instance_identity()['document'] region = iid_doc['region'] instance_id = iid_doc['instanceId'] ec2 = boto.ec2.connect_to_region(region) instance = ec2.get_only_instances(instance_ids=[instance_id])[0] env = instance.tags['elasticbeanstalk:environment-name'] print(env)
def get_instance(ec2, instance_id): return ec2.get_only_instances([instance_id])[0]
def get_master_instances(): ec2 = get_aws_connection('ec2') filters = {'tag:tcm': 'master'} return ec2.get_only_instances(filters=filters)
def statusInstance(ec2Id): ec2 = boto.ec2.connect_to_region(region, aws_access_key_id=accessKeyId, aws_secret_access_key=accessKeySecret) monitor = ec2.get_only_instances(instance_ids=ec2Id) return(monitor[0].state)
import boto from pprint import pprint from boto import ec2 import sys #instance_id = sys.argv[1] region = 'eu-west-1' ec2 = boto.ec2.connect_to_region(region) # instance = ec2.get_instance_attribute(instance_id='i-1bc89491', attribute='interfaces') # print instance instances = ec2.get_only_instances(['i-534d7bd9']) if not instances or len(instances) == 0: sys.exit(1) # nic_id_1 = i[0].interfaces[1].id # nic_id_2 = i[0].interfaces[0].id # print nic_id_1 # print nic_id_2 # x = ec2.get_all_network_interfaces(filters={'network-interface-id': nic_id}) # print x # reservations = ec2.get_all_instances() # instances = [i for r in reservations for i in r.instances] # for i in instances: # if i.id == 'i-1bc89491': # pprint(i.__dict__) # myInstance = i
else: lbs = [] for lb in elbs: for info in lb.instances: if self.instance_id == info.id: lbs.append(lb) return lbs def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] def main(): argument_spec = ec2_argument_spec() argument_spec.update( dict(state={'required': True}, instance_id={'required': True}, ec2_elbs={ 'default': None, 'required': False, 'type': 'list' }, enable_availability_zone={ 'default': True, 'required': False,
asgs = asg.get_all_groups([asg_name]) if len(asg_instances) != 1: self.module.fail_json(msg="Illegal state, expected one auto scaling group.") asg_elbs = asgs[0].load_balancers return asg_elbs def _get_instance(self): """Returns a boto.ec2.InstanceObject for self.instance_id""" try: ec2 = connect_to_aws(boto.ec2, self.region, **self.aws_connect_params) except (boto.exception.NoAuthHandlerFound, StandardError), e: self.module.fail_json(msg=str(e)) return ec2.get_only_instances(instance_ids=[self.instance_id])[0] def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( state={'required': True}, instance_id={'required': True}, ec2_elbs={'default': None, 'required': False, 'type':'list'}, enable_availability_zone={'default': True, 'required': False, 'type': 'bool'}, wait={'required': False, 'default': True, 'type': 'bool'}, wait_timeout={'requred': False, 'default': 0, 'type': 'int'} ) ) module = AnsibleModule(
if "github.dcs.trend.com" in line: PrimaryIP = line.split(" ")[0] elif "github.replica.com" in line: ReplicaIP = line.split(" ")[0] else: newfile.write(line) newfile.write(ReplicaIP+" github.dcs.trend.com\n") newfile.write(PrimaryIP+" github.replica.com\n") hostfile.close() newfile.close() #stop primary node ec2 = boto.ec2.connect_to_region(awsRegion) instList = ec2.get_only_instances() for inst in instList: if inst.private_ip_address == PrimaryIP: ec2.stop_instances(inst.id) break startFailover = time.time() #replica node promoting os.system("./ghe_replica_to_primary.py "+ReplicaIP) print ("replica take place to primary %s" % ( time.time() - startFailover )) #exchange the hostname os.system("sudo mv hosts /etc/hosts")
def terminate_instances(location, placement_group='*'): ec2 = boto.ec2.connect_to_region(location) instances = ec2.get_only_instances( filters={"placement-group-name": placement_group}) for inst in instances: ec2.terminate_instances(instance_ids=[inst.id])