def create_standby_valors(self): empty_valors = self.get_available_valors() # Check if the number of empty valors is less than NUM_STANDBY_VALORS # If so then create additional valors if len(empty_valors) <= NUM_STANDBY_VALORS: NUM_VALORS_TO_CREATE = NUM_STANDBY_VALORS - len(empty_valors) aws = AWS() for i in range(NUM_VALORS_TO_CREATE): create_standby_valors_thread = threading.Thread( target=self.create_and_launch_valor, args=( aws.get_subnet_id(), aws.get_sec_group().id, )) create_standby_valors_thread.start() elif len(empty_valors) > NUM_STANDBY_VALORS: # Number of empty valors is more than the required number of # standby valors so do nothing pass
def get_empty_valor(self, migration_source_valor_id=None): """ Get and return an available valor node. If less than the standby number of valors exist then create more standby valors """ empty_valors = self.get_available_valors(migration_source_valor_id) # Check if there are no empty valors # If there are none then create a valor node if not empty_valors: aws = AWS() valor_id = self.create_and_launch_valor(aws.get_subnet_id(), aws.get_sec_group().id) # Check the valor state and verify that it is 'RUNNING' self.verify_valor_running(valor_id) empty_valor = self.rethinkdb_manager.get_valor(valor_id) else: # Select a random index for the array of empty_valors # This essentially selects a random valor from the list of valors random_index = random.randint(0, len(empty_valors) - 1) # Check the valor state and verify that it is 'RUNNING' self.verify_valor_running(empty_valors[random_index]['valor_id']) empty_valor = empty_valors[random_index] return empty_valor
def main2(): a = AWS('techops-newdev') s3 = a.session.client('s3') x = s3.get_object( Bucket='cb-techops-nonprod-secure-store', Key='dev/apps/cb-techops-nonprod-ert/1.0/api_credentials.ini') pprint(x['Body'].read().decode('UTF-8'))
def valor_create_pool(self, number_of_valors): aws = AWS() return self.valor_manager.create_valor_pool(number_of_valors, aws.get_subnet_id(), aws.get_sec_group().id)
def main(): choice = int(input("Would you like local (1) or AWS (2) recognition?: ")) # create a tracker that uses the classifier tracker = Tracker() recognizer = Recognizer("./knownfaces/") aws = AWS() # get a threaded video stream and start it # wake up time for standard webcam is >= 8 seconds video = WebCamStream() video.start() frame_count = 0 # count frames detected_count = 0 # count detected faces # while the 'q' key has not been pressed while cv2.waitKey(1) != ord('q'): frame = video.read() # get current frame gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # convert to gray faces = tracker.get_coordinates() # coordinates of detected faces for (x, y, w, h) in faces: draw_rectangle(frame, x, y, w, h) # highlight with rectangle # if more faces were detected since last time if new_count > detected_count: # update face count and attempt to recognize all faces # NOTE: could have it predict on only the latest detected faces detected_count = new_count if choice == 1: # local recognition success, prediction = recognizer.predict( gray_frame[y:y + w, x:x + h]) else: # aws recognition convert = bytes(cv2.imencode('.jpg', gray_frame)[1]) success, prediction = aws.compare(convert) if success: # send the predicted name to the server send_name(prediction) # detect new faces every 15 frames if frame_count == 15: frame_count = 0 new_count = tracker.detect_and_track(gray_frame) # found less faces, adjust detected if new_count < detected_count: detected_count = new_count else: # update object trackers tracker.update(gray_frame) # display frame # this is only for testing and verification cv2.imshow('Image', frame) frame_count += 1 # Clean up video.stop() cv2.destroyAllWindows()
def start(self): self.db = AWS() self.reset_ptr_rx() self.set_mode(MODE.RXCONT) while True: time.sleep(.5) rssi_value = self.get_rssi_value() snr_value = self.get_pkt_snr_value() status = self.get_modem_status() sys.stdout.flush() sys.stdout.write("\r%d %d %d %d" % (rssi_value, snr_value, status['rx_ongoing'], status['modem_clear']))
def __init__(self): self.instances = {} self.volumes = {} self.snapshots = {} self.filtered_tag_keys = [] self.require_tags_instance = [] self.metric_prefix = 'resource_tagger_' self.metrics = {} self.aws = AWS() self.tag_default_copy_key = '' self.tag_default_copy_split = ['-', 2] self.setup()
def main(): options, args = parse_args() if get_usage_ratio(options.mount_point) < options.ratio: logger.info("{0} usage ratio is less than {1}. Quitting".format( options.mount_point, options.ratio)) return logger.info("Proceeding with extending media volume") aws = AWS() instance_info = utils.get_instance_identity()['document'] volume_id = create_volume(aws.ec2, options.increment, instance_info['availabilityZone']) new_device_name = '/dev/' + increment_partition_name( get_sorted_partitions()[-1]) aws.ec2.attach_volume(volume_id, instance_info['instanceId'], new_device_name) extend_lvm(new_device_name, options.logical_group, options.logical_device)
def lambda_handler(event, context): secrets = Secrets() which_clouds = [] for c in ['aws', 'heroku', 'do', 'cloudflare']: isEnabled = os.environ.get('USE_{}'.format(c.upper()), None) if isEnabled: which_clouds.append(c) end = last_day_of_month = date.today().replace(day=1) - timedelta(days=1) start = last_day_of_month.replace(day=1) results = [] for cloud in which_clouds: res = {} if cloud == 'aws': # AWS aws_cloud = AWS() res = aws_cloud.get(start, end) if cloud == 'heroku': heroku_cloud = Heroku(secrets.get('heroku')) res = heroku_cloud.get(start, end) if cloud == 'do': do_cloud = DO(secrets.get('do')) res = do_cloud.get(start, end) if cloud == 'cloudflare': cf_cloud = Cloudflare(secrets.get('cloudflare')) res = cf_cloud.get(start, end) results.append(res) if os.environ.get('SLACK_REPORT', False): from report import REPORT r = REPORT(results, secrets.get('slack'), start, end, True) r.slack() return 200
def get_client_materials_from_s3(s3_path, region='us-east-1', akid=None, skid=None): logger.debug("Getting client materials from S3") if s3_path.startswith('s3://'): s3_path = s3_path[5:] path_split = s3_path.split('/') bucket_name = path_split[0] aws = AWS(region, akid, skid) bucket = aws.s3.get_bucket(bucket_name) key = bucket.get_key('/'.join(path_split[1:])) with TempDir() as temp_dir: temp_file_name = path.join(temp_dir, 'client_materials.tgz') with open(temp_file_name, 'w+') as temp_file: key.get_contents_to_file(temp_file) #tf = tarfile.open(temp_file.name) #tf.extractall(temp_dir) ghetto_tar(temp_file_name) print os.listdir(temp_dir)
def __init__(self, aws_access_key_id, aws_secret_access_key, req_ratio=None, req_width=None, req_height=None, consideration_rate=None, dest=None, ): """ Performer constructor method. :args: :required_ratio: The `desired` ratio of the output image :consideration_rate: The approximation value. :max_ratio: The maximum ratio to consider. :min_ratio: The minimum ration to consider. """ if not req_ratio: req_ratio = 1.9 if not req_width: req_width = 1330 if not req_height: req_height = 700 if not consideration_rate: consideration_rate = 0.9 self.req_ratio = req_ratio self.consideration_rate = consideration_rate self.dest = dest if (self.req_ratio or self.consideration_rate) < 0: raise ValueError("The required_ration and consideration_rate should be positive value") self.min_ratio = self.req_ratio - self.consideration_rate self.max_ratio = self.req_ratio + self.consideration_rate if self.min_ratio < 0: self.min_ratio = 0 self.downloader = Downloader(project="news", progress_bar=False, dest=self.dest) self.resizer = Resizer(req_ratio=self.req_ratio, min_ratio=self.min_ratio, max_ratio=self.max_ratio) self.aws = AWS(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)
def __init__(self): u""" Infrastructure Class Contructor """ self.aws = AWS() self.ami = AMI() self.ref_stack_id = Ref('AWS::StackId') self.ami_id = self.ami.minimal_linux_ami() # NOTE: Troposphere doesn't have a template feature to make KeyPairs # So handle this ad-hoc for now. self.keypair_name = 'test-deploy-keypair' if self.keypair_doesnt_exist(): self.create_keypair(self.keypair_name) self.deployment_bucket_prefix = 'test-deploy-bucket-' self.deployment_bucket_name = '{}{}'.format( self.deployment_bucket_prefix, uuid.uuid4().hex[:12].lower()) self.deployment_bucket_location = None if self.deploy_bucket_doesnt_exist(): self.deployment_bucket_location = self.create_deploy_bucket( self.deployment_bucket_name) else: self.deployment_bucket_location = self.get_bucket_url( self.deployment_bucket_name) self.server_certificate_name = 'test-deploy-certificate' self.server_certificate_arn = None if self.server_certificate_doesnt_exist(): self.server_certificate_arn = self.upload_server_certificate() self.template = Template() self.template.add_version('2010-09-09') self.template.add_description( 'AWS Cloudformation Template for autoscaled, load balance controlled EC2 service' ) self.template.add_parameter( Parameter('KeyName', Description='Name of an existing EC2 KeyPair', Default=self.keypair_name, Type='String')) self.template.add_parameter( Parameter('AmiId', Description='Lastest Minimal Linux AMI', Default=self.ami_id, Type='String')) self.template.add_parameter( Parameter('DeployBucketName', Description='Name of the deployment_bucket', Default=self.deployment_bucket_name, Type='String')) self.template.add_parameter( Parameter('DeployBucketLocation', Description='Location of the deployment_bucket', Default=self.deployment_bucket_location, Type='String')) self.template.add_parameter( Parameter('ServerCertificateArn', Description='Certificate ARN for the Load Balancer', Default=self.server_certificate_arn, Type='String')) self.sshlocation = self.template.add_parameter( Parameter( 'SSHLocation', Description= 'The IP address range that can be used to SSH to the EC2 instances', Type='String', MinLength='9', MaxLength='18', Default='0.0.0.0/0', AllowedPattern= r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})", ConstraintDescription=( "must be a valid IP CIDR range of the form x.x.x.x/x."))) self.vpc = self.template.add_resource( VPC('TestDeployVpc', CidrBlock='10.0.0.0/16', Tags=Tags(Application=self.ref_stack_id))) self.subnet = self.template.add_resource( Subnet('TestDeploySubnet', VpcId=Ref(self.vpc), CidrBlock='10.0.0.0/24', Tags=Tags(Application=self.ref_stack_id))) self.gateway = self.template.add_resource( InternetGateway('TestDeployGateway', Tags=Tags(Application=self.ref_stack_id))) self.gatewayattach = self.template.add_resource( VPCGatewayAttachment('AttachGateway', VpcId=Ref(self.vpc), InternetGatewayId=Ref(self.gateway))) self.route_table = self.template.add_resource( RouteTable('RouteTable', VpcId=Ref(self.vpc), Tags=Tags(Application=self.ref_stack_id))) self.route = self.template.add_resource( Route('Route', DependsOn='AttachGateway', GatewayId=Ref('TestDeployGateway'), DestinationCidrBlock='0.0.0.0/0', RouteTableId=Ref(self.route_table))) self.subnet_route_association = self.template.add_resource( SubnetRouteTableAssociation( 'SubnetRouteTableAssociation', SubnetId=Ref(self.subnet), RouteTableId=Ref(self.route_table), DependsOn=['TestDeploySubnet', 'RouteTable'])) self.network_acl = self.template.add_resource( NetworkAcl('NetworkAcl', VpcId=Ref(self.vpc), Tags=Tags(Application=self.ref_stack_id))) self.inbound_private_http = self.template.add_resource( NetworkAclEntry('InboundHTTP', NetworkAclId=Ref(self.network_acl), RuleNumber='100', Protocol='6', PortRange=PortRange(To='80', From='80'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.inbound_private_http_alt = self.template.add_resource( NetworkAclEntry('InboundHTTPAlt', NetworkAclId=Ref(self.network_acl), RuleNumber='101', Protocol='6', PortRange=PortRange(To='8000', From='8000'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.inbound_private_https = self.template.add_resource( NetworkAclEntry('InboundHTTPS', NetworkAclId=Ref(self.network_acl), RuleNumber='102', Protocol='6', PortRange=PortRange(To='443', From='443'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.inbound_ssh = self.template.add_resource( NetworkAclEntry('InboundSSH', NetworkAclId=Ref(self.network_acl), RuleNumber='103', Protocol='6', PortRange=PortRange(To='22', From='22'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.inbound_response = self.template.add_resource( NetworkAclEntry('InboundResponsePorts', NetworkAclId=Ref(self.network_acl), RuleNumber='104', Protocol='6', PortRange=PortRange(To='65535', From='1024'), Egress='false', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.outbound_http = self.template.add_resource( NetworkAclEntry('OutboundHTTP', NetworkAclId=Ref(self.network_acl), RuleNumber='100', Protocol='6', PortRange=PortRange(To='80', From='80'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.outbound_https = self.template.add_resource( NetworkAclEntry('OutboundHTTPS', NetworkAclId=Ref(self.network_acl), RuleNumber='101', Protocol='6', PortRange=PortRange(To='443', From='443'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.outbound_response = self.template.add_resource( NetworkAclEntry('OutboundResponsePorts', NetworkAclId=Ref(self.network_acl), RuleNumber='102', Protocol='6', PortRange=PortRange(To='65535', From='1024'), Egress='true', RuleAction='allow', CidrBlock='0.0.0.0/0')) self.subnet_network_association = self.template.add_resource( SubnetNetworkAclAssociation( 'SubnetNetworkACLAssociation', SubnetId=Ref(self.subnet), NetworkAclId=Ref(self.network_acl), DependsOn=['TestDeploySubnet', 'NetworkAcl'])) self.instance_security_group = self.template.add_resource( SecurityGroup('InstanceSecurityGroup', GroupDescription='Open all ports', SecurityGroupIngress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='22', ToPort='22', CidrIp='0.0.0.0/0'), SecurityGroupRule(IpProtocol='tcp', FromPort='1024', ToPort='65535', CidrIp='0.0.0.0/0') ], SecurityGroupEgress=[ SecurityGroupRule(IpProtocol='tcp', FromPort='1', ToPort='65535', CidrIp='0.0.0.0/0') ], VpcId=Ref(self.vpc))) self.instance = self.template.add_resource( Instance( 'TestDeployInstance', ImageId=Ref('AmiId'), InstanceType='t2.micro', KeyName=Ref('KeyName'), NetworkInterfaces=[ NetworkInterfaceProperty( GroupSet=[Ref('InstanceSecurityGroup')], AssociatePublicIpAddress='true', DeviceIndex='0', DeleteOnTermination='true', SubnetId=Ref('TestDeploySubnet')) ], UserData=Base64( Join('', [ "#!/bin/bash\n", "apt-get update\n", "apt-get -y install python python-pip python-setuptools\n", "mkdir aws-cfn-bootstrap-latest\n", "curl https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz | tar xz -C aws-cfn-bootstrap-latest --strip-components 1\n", "easy_install aws-cfn-bootstrap-latest\n", "/usr/local/bin/cfn-init --stack ", { "Ref": "AWS::StackName" }, " --resource TestDeployInstance", " --region ", { "Ref": "AWS::Region" }, "\n", "/usr/local/bin/cfn-signal --exit-code $? '", { "Ref": "WaitHandle" }, "'\n" "\n", "python -m SimpleHTTPServer 8000 2>&1 >/dev/null &\n", ])), DependsOn=['InstanceSecurityGroup', 'TestDeploySubnet'], Tags=Tags(Application=self.ref_stack_id)))
class WebHookHandler(tornado.web.RequestHandler): p = Properties() with open("token.properties", "rb") as f: p.load(f, "utf-8") verify_token = p["verify_token"] page_access_token = p["page_access_token"] api_url = 'https://graph.facebook.com/v2.9/me/messages' api_headers = {'content-type': 'application/json'} images_root = "/var/www/like-av.xyz/images/" aws = AWS() dao = DAO() def get(self): if self.get_argument("hub.verify_token", "") == self.verify_token: self.write(self.get_argument("hub.challenge", "")) else: self.write('Error, wrong validation token') def post(self): print "receive!" data = json.loads(self.request.body) print data messaging_events = data["entry"][0]["messaging"] text = "" for event in messaging_events: sender = event["sender"]["id"] if ("message" in event and "text" in event["message"]): text = event["message"]["text"] self.sendTextMessage(sender, "給我正妹圖片") if ("message" in event and "attachments" in event["message"]): attachments = event["message"]["attachments"] print attachments if attachments[0]["type"] == "image": img_url = attachments[0]["payload"]["url"] print img_url self.sendTypingMessage(sender, "typing_on") img_bytes = urllib2.urlopen(img_url).read() result = self.aws.search_faces(img_bytes) self.sendTypingMessage(sender, "typing_off") if result is None: self.sendTextMessage(sender, "不是正妹所以找不到") else: pattern = re.compile("https://(.*)/(.*)\?(.*)") match = pattern.search(img_url) img_name = match.group(2) print img_name today = str(datetime.date.today()) self.saveImage(today, img_name, img_bytes) face_count = len(result) for i in xrange(2): face = result[i] if face_count > i else None if face is not None: actress = self.dao.find_one_actress_by_id( face.get("id")) if bool(actress): self.sendImageMessage( sender, face, today + img_name, actress) if ("postback" in event and "payload" in event["postback"]): payload = event["postback"]["payload"] feedback = payload.split(",") if feedback[0] == "O": ox = "like" file = self.images_root + feedback[2] with open(file, "rb") as img_file: self.aws.insert_index_face(feedback[1], img_file.read()) else: ox = "unlike" self.dao.update_one_feedback_by_id(feedback[1], ox, feedback[2]) self.sendTextMessage(sender, "感謝回饋") def saveImage(self, today, img_name, img_bytes): directory = self.images_root + today + "/" if not os.path.exists(directory): os.makedirs(directory) f = open(directory + img_name, 'wb') f.write(img_bytes) f.close() def sendTextMessage(self, sender, text): if len(text) <= 0: return data = {"recipient": {"id": sender}, "message": {"text": text}} params = {"access_token": self.page_access_token} r = requests.post(self.api_url, params=params, data=json.dumps(data), headers=self.api_headers) def sendImageMessage(self, sender, face, img_name, actress): attachment = { "type": "template", "payload": { "template_type": "generic", "elements": [{ "title": actress.get("name"), "image_url": actress.get("img"), "subtitle": "相似度: " + str(round(face.get("similarity"), 2)) + "%", "default_action": { "type": "web_url", # "url": "http://www.dmm.co.jp/mono/dvd/-/list/=/article=actress/id=" + face.get("id") + "/sort=date/", "url": "http://sp.dmm.co.jp/mono/list/index/shop/dvd/article/actress/id/" + face.get("id") + "/sort/date", # "url": "http://www.r18.com/videos/vod/movies/list/id=" + face.get("id") + "/sort=new/type=actress/", "webview_height_ratio": "compact" }, "buttons": [ # { # "type": "web_url", # "url": "http://sukebei.nyaa.se/?page=search&term=" + actress.get("name"), # "title": "去找片" # }, { "type": "postback", "title": "O 覺得像", "payload": "O," + face.get("id") + "," + img_name }, { "type": "postback", "title": "X 差很多", "payload": "X," + face.get("id") + "," + img_name } ] }] } } data = { "recipient": { "id": sender }, "message": { "attachment": attachment } } params = {"access_token": self.page_access_token} r = requests.post(self.api_url, params=params, data=json.dumps(data), headers=self.api_headers) def sendTypingMessage(self, sender, action): data = {"recipient": {"id": sender}, "sender_action": action} params = {"access_token": self.page_access_token} r = requests.post(self.api_url, params=params, data=json.dumps(data), headers=self.api_headers)
def valor_create(self): aws = AWS() return self.valor_manager.create_valor(aws.get_subnet_id(), aws.get_sec_group().id)
def main(): """ Main data execution. """ # Variable to set exit code error_count = 0 # Gather Region data and connect regions = boto.ec2.regions() for region in regions: if region.name in BAD_REGIONS: continue conn = AWS(region.name) connect = conn.connect_to_ec2() # Create table for bad data table = PrettyTable( ['Instance ID', 'Server Name', 'Environment', 'Product']) table.hrules = True table.format = True # Retrieve instance data reservations = connect.get_all_instances() if reservations: LOG.info("Instances found in region {}!".format(region.name)) with open(HTML_FILE, 'a') as html: # Iterate through AWS instances, tag data for res in reservations: for inst in res.instances: for tag in inst.tags: # Checks if Tag key exists if "Name" in inst.tags and not None: amazon_name = inst.tags['Name'] else: amazon_name = "missing name" if "Product" in inst.tags and not None: amazon_product = inst.tags['Product'] else: amazon_product = "missing product" if "Environment" in inst.tags and not None: amazon_environment = inst.tags['Environment'] else: amazon_environment = "missing environment" continue # Check for bad tag values, and print those in table bad_environment = check_data(amazon_environment, CANONICAL_ENVIRONMENTS) bad_product = check_data(amazon_product, CANONICAL_PRODUCTS) if bad_product is not None: table.add_row( [inst.id, amazon_name, "+", amazon_product]) error_count += 1 elif bad_environment is not None: table.add_row([ inst.id, amazon_name, amazon_environment, "+" ]) error_count += 1 else: break # Print region name and coresponding data html.write("<h2> Region {}</h2>".format(region.name)) html.write(table.get_html_string()) else: LOG.info("There are no instances in region {}".format(region.name)) # Prints closing html tags with open(HTML_FILE, 'a') as html_close: html_close.write(''' </body> </html> ''') html_close.close() sys.exit(error_count)
def __init__(self): u""" AMI Class contructor """ self.aws = AWS()
def __init__(self): self.aws = AWS() self.rethinkdb_manager = RethinkDbManager()
def __init(self): super(LoRaRcvCont, self).__init__(verbose) self.set_mode(MODE.STDBY) #self.set_dio_mapping([0] * 6) self.db = AWS()
import os import shutil import sys import time import unittest # local modules from aws import AWS self = os.path.basename(sys.argv[0]) myName = os.path.splitext(self)[0] log = logging.getLogger(myName) logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') log.setLevel(logging.DEBUG) aws = AWS(logger=log) test_bucket = 'af-example1' test_instance_id = 'i-00d4bb1c2ee88275c' @unittest.skip("Skipping Test01") class Test01(unittest.TestCase): def test_get_buckets(self): buckets = aws.get_buckets() log.debug(buckets) self.assertGreater(len(buckets), 0) def test_get_instances(self): response = aws.get_instances() log.debug(response)
def __init__(self): self.dao = DAO() self.aws = AWS()
from weather_master import PackageReader from aws import AWS print("Start Master Module Simulator") reader = PackageReader() print("\nRxDone") #payload = [0x0A, 0x05, 0x2E, 0x95, 0xF3, 0x71, 0x84, 0x00, 0x62, 0x11] payload = [ 0x10, 0x05, 0x2E, 0x8F, 0xED, 0x8C, 0x8E, 0x00, 0x64, 0x17, 0x3F, 0x2C, 0x23, 0x01, 0x89, 0x44 ] #10052E8FED8C8E0064173F2C23018944 print(bytes(payload).hex()) reader.read_package(payload, True) db = AWS() db.connect() db.publish_sensor_data(reader.data_pack, True) db.disconnect()
print("Unlocking Door") lock.unlock_door() print("Door Locked") # if audio is set to new in Firebase, download the latest audio file and play it via the speaker if audio == "new": fb.get_storage() os.system("omxplayer audioVisitor.mp3") fb.update_data({'doorbell/audio/state': 'waiting'}) if __name__ == '__main__': # Firebase class instance fb = Firebase() # AWS class instance aws = AWS(fb) # NewFace class instance nf = NewFace(fb) # Solenoid Lock class instance lock = Solenoid() # Face Check class instance fc = FaceCheck() # Doorbell button class instance bb = BellButton(aws, nf, switch_face_check_on, switch_face_check_off) # Start the BellButton Thread class bb.start() # Start the Face Check Thread class fc.start() # Turn on face check switch_face_check_on() # Run the listen function
instance_id = sys.argv[2] bucket_in = sys.argv[3] bucket_out = sys.argv[4] command = './run.sh' # get the date from one of the image files name = '' for f in os.listdir(image_dir): if not f.endswith('.jpg'): continue name = f[:10] break # pack all images into one archive, so we have only one transfer os.system('tar -C {} -c -f images.tar .'.format(image_dir)) aws = AWS(region='eu-central-1', logger=log) log.info('uploading image files from {} to AWS S3 bucket {}'.format( image_dir, bucket_in)) aws.upload(bucket_in, ['images.tar']) os.remove('images.tar') log.info('starting EC2 instance {}'.format(instance_id)) aws.start_instance(instance_id, wait=True) log.info('starting command {} in instance'.format(command)) response = aws.send_commands(instance_id, [ '#!/bin/bash', 'cd /home/ec2-user', 'su -c "{}" ec2-user'.format(command) ]) response = aws.wait_for_ssm_command(response, timeout=1200, interval=60) log.info('stopping EC2 instance {}'.format(instance_id))