def deploy_instance(): ec2 = boto3.resources('ec2') instances = ec2.create_instances( ImageId=input("Enter ami ID:\n"), MinCount=1, MaxCount=int(input("How many instances you want?\n")), InstanceType=input(Which type of instance you want?\n), KeyName=input("What name you want for the key to access the server?\n") )
def check_instance_count(): session = Session() # TODO: How to check which jobs are currently running/waiting? num_jobs = (session.query(tQueue).filter( tQueue.progress == 'something').count()) ec2 = boto3.resources('ec2') instances = ec2.instances.filter(Filters=[{ 'Name': 'instance-state-name', 'Values': ['running'] }]) run_count = len(instances) if num_jobs / run_count > awsc.JOBS_PER_INSTANCE: # TODO: Where to get image id? ec2.create_instances(ImageId='<ami-image-id>', MinCount=1, MaxCount=5) elif num_jobs / run_count - 1 < sc.JOBS_PER_INSTANCE: ids = (session.query(tComputer).filter( not_(tComputer.name.in_(awsc.LOCAL_MACHINES)))) # Could also filter based on this criteria in query maybe ids.remove('remove ids based on some criteria - dont stop them all') ec2.instances.filter(InstanceIds=ids).stop() ec2.instances.filter(InstanceIds=ids).terminate()
# -*- coding: utf-8 -*- """ Created on Sun Mar 28 15:24:33 2021 @author: ALI """ import boto3 from botocore.client import Config ACCESS_KEY_ID = '' ACCESS_SECRET_KEY = '' BUCKET_NAME = 'bucketali' data = open('img104.jpg', 'rb') s2 = boto3.resources('s3', aws_access_key_id=ACCESS_KEY_ID, aws_secret_access_key=aws_secret_access_key, config=Config(signature_version='s3v4')) s3.Bucket(BUCKET_NAME).put_object(key='img104.jpg', Body=data, ContentType='image/jpg') print("Done")
import boto3 from datetime import datetime from msvcrt import getch autoS_client = boto3.client('autoscaling') #created an autoscaling client ec2 = boto3.resources('ec2') cloudwatch = boto3.client('cloudwatch') with open("startup.sh", 'r') as f: script = f.read() keyname = "Cloud_Lab0" #key pair name from cosole print("\nCreating Instance...\n") instances = ec2.create_instances( ImageId='ami-07c8bc5c1ce9598c3', #ami of the instance InstanceType='t2.micro', KeyName=keyname, UserData=script, SecurityGroups=['boto3users'], MinCount=1, MaxCount=1) instance = instances[0] print("\nCreating Launch Configuration for Autoscaling...\n") lauch_config = autoS_client.create_launch_configuration( LaunchConfigurationName="lab4_as_launch_config", ImageId='ami-07c8bc5c1ce9598c3', #ami of the instance InstanceType='t2.micro',
import boto3 import boto ec2=boto3.resources("ec2") for i in ec2.instances.all(): for tag in i.tags: if 'Name'in tag['Key']: name=tag['Value'] if 'aws:cloudformation:stack-name' tag['Key']: stack_name=tag['Value'] print i.id, i.state['Name'], i.platform, i.launch_time, name,stack_name cfn = boto.cloudformation.connect_to_region("eu-west-1") for stack in cfn.describe_stacks(): print stack.stack_name, stack.stack_id, stack.stack_status,stack.creation_time,stack.LastUpdatedTime
def read_from_s3(): obj = boto3.resources('s3').Object('lmtd-class', 'heyshefan.json') print(type(obj)) data = json.load(obj.get()['body']) print(data)
def start_instance(): instances=input("Enter the ids of the instances that you want to destroy: ") ids = [instances] ec2 = boto3.resources('ec2') ec2.instances.filter(InstancesIds = ids).start()
def main(): ec2_ob=boto3.resources("ec2","us-east-1") db_group=get_hosts(ec2_ob,'db')
#!/usr/bin/python #Importing Libraries import boto3 import sys #creating variables for input to obtain required details ec2id = sys.argv[1] reg = sys.argv[2] #calling ec2 client through boto3 client = boto3.client('ec2') ec2 = boto3.resources('ec2', region_name=reg) all_sg_ids = [sg['GroupId'] for sg in ec2.Instance(ec2id).security_groups] for secid in all_sg_ids; result = client.describe_security_groups(GroupIds=[secid]) print(result)