Exemplo n.º 1
0
 def __init__(self, provider="other"):
     self.logger = logging.getLogger("DNSWatch.InstanceInfo")
     self.provider = provider
     if provider == "gce":
         self.cloud = GCE()
     elif provider == "aws":
         self.cloud = AWS()
Exemplo n.º 2
0
    def create_standby_valors(self):

        empty_valors = self.get_available_valors()

        # Check if the number of empty valors is less than NUM_STANDBY_VALORS
        # If so then create additional valors
        if len(empty_valors) <= NUM_STANDBY_VALORS:

            NUM_VALORS_TO_CREATE = NUM_STANDBY_VALORS - len(empty_valors)

            aws = AWS()

            for i in range(NUM_VALORS_TO_CREATE):
                create_standby_valors_thread = threading.Thread(
                    target=self.create_and_launch_valor,
                    args=(
                        aws.get_subnet_id(),
                        aws.get_sec_group().id,
                    ))
                create_standby_valors_thread.start()

        elif len(empty_valors) > NUM_STANDBY_VALORS:
            # Number of empty valors is more than the required number of
            # standby valors so do nothing
            pass
Exemplo n.º 3
0
def main():
    choice = int(input("Would you like local (1) or AWS (2) recognition?: "))
    # create a tracker that uses the classifier
    tracker = Tracker()
    recognizer = Recognizer("./knownfaces/")
    aws = AWS()

    # get a threaded video stream and start it
    # wake up time for standard webcam is >= 8 seconds
    video = WebCamStream()
    video.start()

    frame_count = 0  # count frames
    detected_count = 0  # count detected faces

    # while the 'q' key has not been pressed
    while cv2.waitKey(1) != ord('q'):

        frame = video.read()  # get current frame
        gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)  # convert to gray
        faces = tracker.get_coordinates()  # coordinates of detected faces

        for (x, y, w, h) in faces:
            draw_rectangle(frame, x, y, w, h)  # highlight with rectangle
            # if more faces were detected since last time
            if new_count > detected_count:
                # update face count and attempt to recognize all faces
                # NOTE: could have it predict on only the latest detected faces
                detected_count = new_count

                if choice == 1:  # local recognition
                    success, prediction = recognizer.predict(
                        gray_frame[y:y + w, x:x + h])
                else:  # aws recognition
                    convert = bytes(cv2.imencode('.jpg', gray_frame)[1])
                    success, prediction = aws.compare(convert)

                if success:
                    # send the predicted name to the server
                    send_name(prediction)

        # detect new faces every 15 frames
        if frame_count == 15:
            frame_count = 0
            new_count = tracker.detect_and_track(gray_frame)
            # found less faces, adjust detected
            if new_count < detected_count:
                detected_count = new_count
        else:
            # update object trackers
            tracker.update(gray_frame)

        # display frame
        # this is only for testing and verification
        cv2.imshow('Image', frame)
        frame_count += 1

    # Clean up
    video.stop()
    cv2.destroyAllWindows()
Exemplo n.º 4
0
    def get_empty_valor(self, migration_source_valor_id=None):
        """ Get and return an available valor node.
        If less than the standby number of valors exist then
        create more standby valors
        """

        empty_valors = self.get_available_valors(migration_source_valor_id)

        # Check if there are no empty valors
        # If there are none then create a valor node
        if not empty_valors:
            aws = AWS()
            valor_id = self.create_and_launch_valor(aws.get_subnet_id(),
                                                    aws.get_sec_group().id)

            # Check the valor state and verify that it is 'RUNNING'
            self.verify_valor_running(valor_id)

            empty_valor = self.rethinkdb_manager.get_valor(valor_id)
        else:
            # Select a random index for the array of empty_valors
            # This essentially selects a random valor from the list of valors
            random_index = random.randint(0, len(empty_valors) - 1)

            # Check the valor state and verify that it is 'RUNNING'
            self.verify_valor_running(empty_valors[random_index]['valor_id'])

            empty_valor = empty_valors[random_index]

        return empty_valor
Exemplo n.º 5
0
    def valor_create_pool(self, number_of_valors):

        aws = AWS()

        return self.valor_manager.create_valor_pool(number_of_valors,
                                                    aws.get_subnet_id(),
                                                    aws.get_sec_group().id)
Exemplo n.º 6
0
    def _detect_provider(self):
        self.logger.info("Detecting cloud provider.")
        provider = "other"
        gce = GCE()
        aws = AWS()

        if gce.is_inside():
            provider = "gce"
        elif aws.is_inside():
            provider = "aws"
        
        self.logger.info("My cloud provider is: {}.".format(provider))
        return provider
Exemplo n.º 7
0
    def start(self):
        self.db = AWS()
        self.reset_ptr_rx()
        self.set_mode(MODE.RXCONT)

        while True:
            time.sleep(.5)
            rssi_value = self.get_rssi_value()
            snr_value = self.get_pkt_snr_value()
            status = self.get_modem_status()
            sys.stdout.flush()
            sys.stdout.write("\r%d %d %d %d" %
                             (rssi_value, snr_value, status['rx_ongoing'],
                              status['modem_clear']))
Exemplo n.º 8
0
    def __init__(self):
        self.instances = {}
        self.volumes = {}
        self.snapshots = {}
        self.filtered_tag_keys = []
        self.require_tags_instance = []
        self.metric_prefix = 'resource_tagger_'
        self.metrics = {}
        self.aws = AWS()

        self.tag_default_copy_key = ''
        self.tag_default_copy_split = ['-', 2]

        self.setup()
Exemplo n.º 9
0
def main2():
    a = AWS('techops-newdev')
    s3 = a.session.client('s3')
    x = s3.get_object(
        Bucket='cb-techops-nonprod-secure-store',
        Key='dev/apps/cb-techops-nonprod-ert/1.0/api_credentials.ini')
    pprint(x['Body'].read().decode('UTF-8'))
Exemplo n.º 10
0
def _get_all_instances():
    try:
        connection = AWS(access_key=env.aws_access_key, secret_key=env.aws_secret_key, region=env.ec2_region).connect()
    except Exception:
        Notification("Could not connect to AWS").error()
        abort("Exited!")

    instances = []
    for reservation in connection.get_all_instances(
            filters={'key-name': env.aws_key_name, 'instance-state-name': 'running'}):
        for host in reservation.instances:
            # build the user@hostname string for ssh to be used later
            instances.append('ubuntu@' + str(host.public_dns_name))

    Notification('Got {} instances:\n{}'.format(len(instances), "\n".join(instances))).info()

    return instances
def lambda_handler(event, context):
    secrets = Secrets()

    which_clouds = []
    for c in ['aws', 'heroku', 'do', 'cloudflare']:
        isEnabled = os.environ.get('USE_{}'.format(c.upper()), None)
        if isEnabled:
            which_clouds.append(c)

    end = last_day_of_month = date.today().replace(day=1) - timedelta(days=1)
    start = last_day_of_month.replace(day=1)

    results = []

    for cloud in which_clouds:
        res = {}
        if cloud == 'aws':
            # AWS
            aws_cloud = AWS()
            res = aws_cloud.get(start, end)

        if cloud == 'heroku':
            heroku_cloud = Heroku(secrets.get('heroku'))
            res = heroku_cloud.get(start, end)

        if cloud == 'do':
            do_cloud = DO(secrets.get('do'))
            res = do_cloud.get(start, end)

        if cloud == 'cloudflare':
            cf_cloud = Cloudflare(secrets.get('cloudflare'))
            res = cf_cloud.get(start, end)

        results.append(res)

    if os.environ.get('SLACK_REPORT', False):
        from report import REPORT
        r = REPORT(results, secrets.get('slack'), start, end, True)
        r.slack()
    return 200
Exemplo n.º 12
0
def _create_instance(num_instances=1):
    """
    Creates a new EC2 Instance using boto.
    """
    try:
        connection = AWS(access_key=env.aws_access_key, secret_key=env.aws_secret_key, region=env.ec2_region).connect()
    except Exception:
        Notification("Could not connect to AWS").error()
        abort("Exited!")

    aws_ami = prompt("Hit enter to use the default Ubuntu AMI or enter one:", default=env.ec2_ami)
    aws_security_groups = prompt("Enter the security group (must already exist)?", default=env.ec2_security_group)
    aws_instance_type = prompt("What instance type do you want to create? ", default=env.ec2_instance)
    aws_instance_key_name = prompt("Enter your key pair name (don't include .pem extension)", default=env.aws_key_name)

    BUILD_SERVER = dict(image_id=aws_ami,
                        instance_type=aws_instance_type,
                        security_groups=[aws_security_groups],
                        key_name=aws_instance_key_name)

    Notification('Spinning up the instances...').info()

    # Create new instance using boto.
    for _ in range(int(num_instances)):
        reservation = connection.run_instances(**BUILD_SERVER)
        instance = reservation.instances[0]
        time.sleep(5)
        while instance.state != 'running':
            time.sleep(5)
            instance.update()
            Notification('-Instance {} is {}'.format(instance.id, instance.state)).info()

    # A new instance take a little while to allow connections so sleep for x seconds.
    Notification('Sleeping for {} seconds before attempting to connect...'.format(TEMP_SLEEP)).info()
    time.sleep(TEMP_SLEEP)

    return instance.public_dns_name
Exemplo n.º 13
0
    def __init__(self, aws_access_key_id, aws_secret_access_key,
                 req_ratio=None, req_width=None, req_height=None,
                 consideration_rate=None, dest=None, ):
        """
        Performer constructor method.
        :args:
            :required_ratio: The `desired` ratio of the output image
            :consideration_rate: The approximation value.
            :max_ratio: The maximum ratio to consider.
            :min_ratio: The minimum ration to consider.
        """
        if not req_ratio:
            req_ratio = 1.9
        if not req_width:
            req_width = 1330
        if not req_height:
            req_height = 700
        if not consideration_rate:
            consideration_rate = 0.9
        self.req_ratio = req_ratio
        self.consideration_rate = consideration_rate
        self.dest = dest
        if (self.req_ratio or self.consideration_rate) < 0:
            raise ValueError("The required_ration and consideration_rate should be positive value")

        self.min_ratio = self.req_ratio - self.consideration_rate
        self.max_ratio = self.req_ratio + self.consideration_rate
        if self.min_ratio < 0:
            self.min_ratio = 0

        self.downloader = Downloader(project="news", progress_bar=False,
                                     dest=self.dest)
        self.resizer = Resizer(req_ratio=self.req_ratio, min_ratio=self.min_ratio,
                               max_ratio=self.max_ratio)
        self.aws = AWS(aws_access_key_id=aws_access_key_id,
                       aws_secret_access_key=aws_secret_access_key)
Exemplo n.º 14
0
class AMI():
    u""" Manage AWS AMI Resources """
    def __init__(self):
        u""" AMI Class contructor """
        self.aws = AWS()

    def minimal_linux_ami(self):
        u""" Get Minimal AWS Linux AMI ID """
        client = self.aws.get_client('ec2')
        try:
            res = client.describe_images(
                Owners=['self', '099720109477'],
                Filters=[{
                    'Name': 'virtualization-type',
                    'Values': ['hvm']
                }, {
                    'Name': 'root-device-type',
                    'Values': ['ebs']
                }, {
                    'Name': 'architecture',
                    'Values': ['x86_64']
                }, {
                    'Name':
                    'description',
                    'Values':
                    ['Canonical, Ubuntu, 16.04 LTS, amd64 xenial image*']
                }])
        except ClientError as ex:
            print(ex)
            sys.exit()
        timestep = None
        current_time = datetime.now(timezone.utc)
        ami_id = None
        for image in res['Images']:
            if timestep:
                create_time = parse(image['CreationDate'])
                current_timestep = current_time - create_time
                if current_timestep < timestep:
                    timestep = current_timestep
                    ami_id = image['ImageId']
            else:
                create_time = parse(image['CreationDate'])
                timestep = current_time - create_time
                ami_id = image['ImageId']
        return ami_id
Exemplo n.º 15
0
def main():
    options, args = parse_args()

    if get_usage_ratio(options.mount_point) < options.ratio:
        logger.info("{0} usage ratio is less than {1}. Quitting".format(
            options.mount_point, options.ratio))
        return

    logger.info("Proceeding with extending media volume")

    aws = AWS()
    instance_info = utils.get_instance_identity()['document']

    volume_id = create_volume(aws.ec2, options.increment,
                              instance_info['availabilityZone'])
    new_device_name = '/dev/' + increment_partition_name(
        get_sorted_partitions()[-1])
    aws.ec2.attach_volume(volume_id, instance_info['instanceId'],
                          new_device_name)
    extend_lvm(new_device_name, options.logical_group, options.logical_device)
Exemplo n.º 16
0
def get_client_materials_from_s3(s3_path,
                                 region='us-east-1',
                                 akid=None,
                                 skid=None):
    logger.debug("Getting client materials from S3")
    if s3_path.startswith('s3://'):
        s3_path = s3_path[5:]
    path_split = s3_path.split('/')
    bucket_name = path_split[0]

    aws = AWS(region, akid, skid)
    bucket = aws.s3.get_bucket(bucket_name)
    key = bucket.get_key('/'.join(path_split[1:]))

    with TempDir() as temp_dir:
        temp_file_name = path.join(temp_dir, 'client_materials.tgz')
        with open(temp_file_name, 'w+') as temp_file:
            key.get_contents_to_file(temp_file)
            #tf = tarfile.open(temp_file.name)
            #tf.extractall(temp_dir)
            ghetto_tar(temp_file_name)
            print os.listdir(temp_dir)
Exemplo n.º 17
0
 def __init__(self):
     u""" AMI Class contructor """
     self.aws = AWS()
Exemplo n.º 18
0
    def valor_create(self):

        aws = AWS()

        return self.valor_manager.create_valor(aws.get_subnet_id(),
                                               aws.get_sec_group().id)
Exemplo n.º 19
0
def main():
    ''' Gets arguments from the user, sign a SAML response and opens a session with the generated AccessKey.
        Mandatory parameters are: 
    '''

    # Parse Arguments
    args = get_args()

    # Check if the user provided file to load from
    if args.load_file:
        print "[+] Loading SAMLResponse from file..."
        saved_response = open(args.load_file, "r").read()
        arn, role_name = AWS.load(saved_response)
        aws_session_token = AWS.assume_role(
            AWS.TEMPLATES['role_arn'].format(arn=arn, role=role_name),
            AWS.TEMPLATES['principal_arn'].format(arn=arn), saved_response)

        # Open shell with the session
        AWS.apply_cli_session(aws_session_token["Credentials"], args.region)
        return

    # Set time parameters for assertion
    time = AWS.gen_timestamp(base_time=args.time)
    saml_expiration = AWS.gen_timestamp(base_time=args.time,
                                        minutes=int(args.saml_validity))
    session_expiration = AWS.gen_timestamp(base_time=args.time,
                                           minutes=int(args.session_validity))

    # Create the assertion
    print "[+] Creating the assertion"
    root = AWS.create_assertion(time, args.idp, args.user, saml_expiration,
                                args.sp, session_expiration, args.session_name,
                                args.roles, args.arn)

    # Sign the assertion
    print "[+] Signing the assertion with the private key provided"
    signed_root = AWS.sign_assertion(root, args.key, args.cert)

    # Insert signed assertion to saml response
    saml_response = AWS.TEMPLATES["response"].format(
        id=AWS.gen_id(),
        issue_instant=time,
        issuer=args.idp,
        assertion=etree.tostring(signed_root))

    # Encode the saml response with B64
    encoded_response = b64encode(saml_response)

    # Check if the user provided file to export to
    if args.out_file:
        print "[+] Writing the SAMLResponse to file: %s" % args.out_file
        with open(args.out_file, "w") as out_file:
            out_file.write(encoded_response)
        # Exit
        return

    # Assume role and get session token
    print "[+] Calling AssumeRoleWithSAML API"
    aws_session_token = AWS.assume_role(
        AWS.TEMPLATES['role_arn'].format(arn=args.arn, role=args.roles[0]),
        AWS.TEMPLATES['principal_arn'].format(arn=args.arn), encoded_response)

    # Open shell with the session
    print "[+] Opening a shell"
    AWS.apply_cli_session(aws_session_token["Credentials"], args.region)
Exemplo n.º 20
0
class WebHookHandler(tornado.web.RequestHandler):
    p = Properties()
    with open("token.properties", "rb") as f:
        p.load(f, "utf-8")

    verify_token = p["verify_token"]
    page_access_token = p["page_access_token"]
    api_url = 'https://graph.facebook.com/v2.9/me/messages'
    api_headers = {'content-type': 'application/json'}
    images_root = "/var/www/like-av.xyz/images/"

    aws = AWS()
    dao = DAO()

    def get(self):
        if self.get_argument("hub.verify_token", "") == self.verify_token:
            self.write(self.get_argument("hub.challenge", ""))
        else:
            self.write('Error, wrong validation token')

    def post(self):
        print "receive!"

        data = json.loads(self.request.body)
        print data

        messaging_events = data["entry"][0]["messaging"]
        text = ""
        for event in messaging_events:
            sender = event["sender"]["id"]
            if ("message" in event and "text" in event["message"]):
                text = event["message"]["text"]
                self.sendTextMessage(sender, "給我正妹圖片")

            if ("message" in event and "attachments" in event["message"]):
                attachments = event["message"]["attachments"]
                print attachments

                if attachments[0]["type"] == "image":
                    img_url = attachments[0]["payload"]["url"]
                    print img_url

                    self.sendTypingMessage(sender, "typing_on")
                    img_bytes = urllib2.urlopen(img_url).read()
                    result = self.aws.search_faces(img_bytes)
                    self.sendTypingMessage(sender, "typing_off")
                    if result is None:
                        self.sendTextMessage(sender, "不是正妹所以找不到")
                    else:
                        pattern = re.compile("https://(.*)/(.*)\?(.*)")
                        match = pattern.search(img_url)
                        img_name = match.group(2)
                        print img_name

                        today = str(datetime.date.today())
                        self.saveImage(today, img_name, img_bytes)

                        face_count = len(result)
                        for i in xrange(2):
                            face = result[i] if face_count > i else None
                            if face is not None:
                                actress = self.dao.find_one_actress_by_id(
                                    face.get("id"))
                                if bool(actress):
                                    self.sendImageMessage(
                                        sender, face, today + img_name,
                                        actress)

            if ("postback" in event and "payload" in event["postback"]):
                payload = event["postback"]["payload"]
                feedback = payload.split(",")
                if feedback[0] == "O":
                    ox = "like"
                    file = self.images_root + feedback[2]
                    with open(file, "rb") as img_file:
                        self.aws.insert_index_face(feedback[1],
                                                   img_file.read())
                else:
                    ox = "unlike"

                self.dao.update_one_feedback_by_id(feedback[1], ox,
                                                   feedback[2])
                self.sendTextMessage(sender, "感謝回饋")

    def saveImage(self, today, img_name, img_bytes):
        directory = self.images_root + today + "/"
        if not os.path.exists(directory):
            os.makedirs(directory)

        f = open(directory + img_name, 'wb')
        f.write(img_bytes)
        f.close()

    def sendTextMessage(self, sender, text):
        if len(text) <= 0:
            return

        data = {"recipient": {"id": sender}, "message": {"text": text}}
        params = {"access_token": self.page_access_token}

        r = requests.post(self.api_url,
                          params=params,
                          data=json.dumps(data),
                          headers=self.api_headers)

    def sendImageMessage(self, sender, face, img_name, actress):
        attachment = {
            "type": "template",
            "payload": {
                "template_type":
                "generic",
                "elements": [{
                    "title":
                    actress.get("name"),
                    "image_url":
                    actress.get("img"),
                    "subtitle":
                    "相似度: " + str(round(face.get("similarity"), 2)) + "%",
                    "default_action": {
                        "type":
                        "web_url",
                        # "url": "http://www.dmm.co.jp/mono/dvd/-/list/=/article=actress/id=" + face.get("id") + "/sort=date/",
                        "url":
                        "http://sp.dmm.co.jp/mono/list/index/shop/dvd/article/actress/id/"
                        + face.get("id") + "/sort/date",
                        # "url": "http://www.r18.com/videos/vod/movies/list/id=" + face.get("id") + "/sort=new/type=actress/",
                        "webview_height_ratio":
                        "compact"
                    },
                    "buttons": [
                        # {
                        #     "type": "web_url",
                        #     "url": "http://sukebei.nyaa.se/?page=search&term=" + actress.get("name"),
                        #     "title": "去找片"
                        # },
                        {
                            "type": "postback",
                            "title": "O 覺得像",
                            "payload": "O," + face.get("id") + "," + img_name
                        },
                        {
                            "type": "postback",
                            "title": "X 差很多",
                            "payload": "X," + face.get("id") + "," + img_name
                        }
                    ]
                }]
            }
        }

        data = {
            "recipient": {
                "id": sender
            },
            "message": {
                "attachment": attachment
            }
        }
        params = {"access_token": self.page_access_token}

        r = requests.post(self.api_url,
                          params=params,
                          data=json.dumps(data),
                          headers=self.api_headers)

    def sendTypingMessage(self, sender, action):
        data = {"recipient": {"id": sender}, "sender_action": action}
        params = {"access_token": self.page_access_token}

        r = requests.post(self.api_url,
                          params=params,
                          data=json.dumps(data),
                          headers=self.api_headers)
Exemplo n.º 21
0
    def __init__(self):

        self.aws = AWS()
        self.rethinkdb_manager = RethinkDbManager()
Exemplo n.º 22
0
class ValorManager:
    def __init__(self):

        self.aws = AWS()
        self.rethinkdb_manager = RethinkDbManager()

    def get_stack_name(self):

        resource = boto3.resource('ec2')

        meta_data = get_instance_metadata(timeout=0.5, num_retries=2)

        myinstance = resource.Instance(meta_data['instance-id'])

        # Find the Stack name in the instance tags
        for tag in myinstance.tags:
            if 'aws:cloudformation:stack-name' in tag['Key']:
                return tag['Value']

    def get_efs_mount(self):

        stack_name = self.get_stack_name()

        cloudformation = boto3.resource('cloudformation')
        efs_stack = cloudformation.Stack(stack_name)

        file_system_id = ''

        for output in efs_stack.outputs:

            if output['OutputKey'] == 'FileSystemID':
                file_system_id = output['OutputValue']

        return file_system_id

    def get_valor_for_virtue(self, virtue_id):
        # Go through the list of valors and return the valor that has the specified
        # virtue running on it.
        for valor in self.list_valors():
            for virtue in valor['virtues']:
                if virtue == virtue_id:
                    return valor

    def get_available_valors(self, migration_source_valor_id=None):

        if not migration_source_valor_id:
            # Return valors that have the required number of virtues
            return [
                valor for valor in self.list_valors()
                if (len(valor.get('virtues', [])) < MAX_VIRTUES_PER_VALOR) and
                (valor['state'] == 'RUNNING')
            ]
        else:
            # Return valors that have the required number of virtues and that do not
            # match the migration source valor
            return [
                valor for valor in self.list_valors()
                if (len(valor.get('virtues', [])) < MAX_VIRTUES_PER_VALOR) and
                (valor['state'] == 'RUNNING') and (
                    valor['valor_id'] != migration_source_valor_id)
            ]

    def get_empty_valors(self):

        # Return valors that have no virtues on them
        return [
            valor for valor in self.list_valors() if len(valor['virtues']) == 0
        ]

    def create_and_launch_valor(self, subnet_id, security_group_id):

        valor_id = self.create_valor(subnet_id, security_group_id)

        self.launch_valor(valor_id)

        return valor_id

    def create_standby_valors(self):

        empty_valors = self.get_available_valors()

        # Check if the number of empty valors is less than NUM_STANDBY_VALORS
        # If so then create additional valors
        if len(empty_valors) <= NUM_STANDBY_VALORS:

            NUM_VALORS_TO_CREATE = NUM_STANDBY_VALORS - len(empty_valors)

            aws = AWS()

            for i in range(NUM_VALORS_TO_CREATE):
                create_standby_valors_thread = threading.Thread(
                    target=self.create_and_launch_valor,
                    args=(
                        aws.get_subnet_id(),
                        aws.get_sec_group().id,
                    ))
                create_standby_valors_thread.start()

        elif len(empty_valors) > NUM_STANDBY_VALORS:
            # Number of empty valors is more than the required number of
            # standby valors so do nothing
            pass

    def get_empty_valor(self, migration_source_valor_id=None):
        """ Get and return an available valor node.
        If less than the standby number of valors exist then
        create more standby valors
        """

        empty_valors = self.get_available_valors(migration_source_valor_id)

        # Check if there are no empty valors
        # If there are none then create a valor node
        if not empty_valors:
            aws = AWS()
            valor_id = self.create_and_launch_valor(aws.get_subnet_id(),
                                                    aws.get_sec_group().id)

            # Check the valor state and verify that it is 'RUNNING'
            self.verify_valor_running(valor_id)

            empty_valor = self.rethinkdb_manager.get_valor(valor_id)
        else:
            # Select a random index for the array of empty_valors
            # This essentially selects a random valor from the list of valors
            random_index = random.randint(0, len(empty_valors) - 1)

            # Check the valor state and verify that it is 'RUNNING'
            self.verify_valor_running(empty_valors[random_index]['valor_id'])

            empty_valor = empty_valors[random_index]

        return empty_valor

    def list_valors(self):

        valors = self.rethinkdb_manager.list_valors()
        virtues = self.rethinkdb_manager.list_virtues()

        # Update each valor field with a virtues field.
        [valor.update({'virtues': []}) for valor in valors]

        # Update valors list with associated virtues for each valor
        for valor in valors:
            for virtue in virtues:
                if (valor['valor_id'] == virtue['valor_id']):
                    valor['virtues'].append(virtue['virtue_id'])

        return valors

    def create_valor(self, subnet, sec_group):

        # Base Setup
        with open('/mnt/efs/valor/deploy/compute/' + 'setup.sh', 'r') as f:
            base_setup_data = f.read()
        base_setup_data = base_setup_data.replace('${1}', self.get_efs_mount())

        # Xenblanket Setup
        with open('/mnt/efs/valor/deploy/compute/' + 'setup_xenblanket.sh',
                  'r') as f:
            xenblanket_setup_data = f.read()
        xenblanket_setup_data = xenblanket_setup_data.replace(
            '${1}', self.get_efs_mount())

        # Gaius Setup
        with open('/mnt/efs/valor/deploy/compute/' + 'setup_gaius.sh',
                  'r') as f:
            gaius_setup_data = f.read()

        # Syslog-ng Setup
        with open('/mnt/efs/valor/deploy/compute/' + 'setup_syslog_ng.sh',
                  'r') as f:
            syslog_ng_setup_data = f.read()

        user_data = base_setup_data + xenblanket_setup_data + \
                    gaius_setup_data + syslog_ng_setup_data

        valor_config = {
            'image_id': 'ami-0f9cf087c1f27d9b1',
            'inst_type': 't2.xlarge',
            'subnet_id': subnet,
            'key_name': 'starlab-virtue-te',
            'tag_key': 'Project',
            'tag_value': 'Virtue',
            'sec_group': sec_group,
            'inst_profile_name': '',
            'inst_profile_arn': '',
            'user_data': user_data,
        }

        instance = self.aws.instance_create(**valor_config)

        self.setup_valor(instance)

        return instance.id

    def setup_valor(self, instance):

        router_ip = self.rethinkdb_manager.get_router()['address']

        valor = Valor(instance.id)

        valor.connect_with_ssh()

        self.rethinkdb_manager.add_valor(valor)

        # Add the valor node to the router
        RouterManager(router_ip).add_valor(valor)

        valor.setup(router_ip)

        # valor.verify_setup()

        instance.wait_until_stopped()
        instance.reload()

        self.rethinkdb_manager.set_valor(valor.aws_instance.id, 'state',
                                         'STOPPED')

        return instance.id

    def launch_valor(self, valor_id):

        instance = self.aws.instance_launch(valor_id)

        Valor(valor_id).connect_with_ssh()

        self.rethinkdb_manager.set_valor(valor_id, 'state', 'RUNNING')

        valor_ip = self.rethinkdb_manager.get_valor(valor_id)['address']

        # Add NFS export line for valor to access email preferences dir
        try:
            line = subprocess.check_output("grep mnt/ost /etc/exports",
                                           shell=True).strip("\n")
            line_num = subprocess.check_output(
                "sed -n '/mnt\/ost/=' /etc/exports", shell=True).strip("\n")

            # Remove current line and replace with updated export
            ret = subprocess.check_call(
                "sudo sed -i '{}d' /etc/exports".format(line_num), shell=True)
            assert ret == 0

            line += " {}(rw,sync,no_subtree_check)".format(valor_ip)
            ret = subprocess.check_call(
                'echo "{}" | sudo tee -a /etc/exports'.format(line),
                shell=True)
            assert ret == 0

            ret = subprocess.check_call(['sudo', 'exportfs', '-ra'])
            assert ret == 0

        except Exception as e:
            print("Failed to append to NFS exports with message: {}".format(e))

        return instance.id

    def verify_valor_running(self, valor_id):

        # Check the valor state and verify that it is 'RUNNING'
        valor_state = self.rethinkdb_manager.get_valor(valor_id)['state']

        valor_wait_timeout = 60
        valor_wait_count = 0

        while valor_state != 'RUNNING':

            if valor_state == 'STOPPED':
                self.launch_valor(valor_id)
                break

            elif valor_state == 'CREATING':
                time.sleep(10)
                valor_state = self.rethinkdb_manager.get_valor(
                    valor_id)['state']
                valor_wait_count = valor_wait_count + 1

            elif valor_wait_count >= valor_wait_timeout:
                Exception('ERROR: Timed out waiting for valor to reach '
                          '[RUNNING] state - current state is [{}]'.format(
                              valor_state))

            else:
                Exception(
                    'ERROR: Unexpected Error condition encountered while getting a '
                    'valor')

    def create_valor_pool(self, number_of_valors, subnet, sec_group):

        valor_ids = []

        for index in range(int(number_of_valors)):

            valor_id = self.create_valor(subnet, sec_group)
            valor_ids.append(valor_id)

        for valor_id in valor_ids:
            self.launch_valor(valor_id)

        return valor_ids

    def stop_valor(self, valor_id):

        valor_found = False
        for valor in self.get_empty_valors():
            if valor_id == valor['valor_id']:
                valor_found = True

        if valor_found:
            instance = self.aws.instance_stop(valor_id)

            self.rethinkdb_manager.set_valor(valor_id, 'state', 'STOPPED')

            valor_ip = self.rethinkdb_manager.get_valor(valor_id)['address']

            # Remove NFS export entry
            try:
                line = subprocess.check_output("grep mnt/ost /etc/exports",
                                               shell=True).strip("\n")
                line_num = subprocess.check_output(
                    "sed -n '/mnt\/ost/=' /etc/exports",
                    shell=True).strip("\n")

                # Remove current line and replace with updated export
                ret = subprocess.check_call(
                    "sudo sed -i '{}d' /etc/exports".format(line_num),
                    shell=True)
                assert ret == 0

                line = line.replace(
                    " {}(rw,sync,no_subtree_check)".format(valor_ip), "")
                ret = subprocess.check_call(
                    'echo "{}" | sudo tee -a /etc/exports'.format(line),
                    shell=True)
                assert ret == 0

                ret = subprocess.check_call(['sudo', 'exportfs', '-ra'])
                assert ret == 0

            except Exception as e:
                print("Failed to remove NFS export with message: {}".format(e))

            return instance.id

        else:
            virtues_on_valor = [
                valor['virtues'] for valor in self.list_valors()
                if valor['valor_id'] == valor_id
            ]
            # If no valors are found then the valor_id does not exist
            if len(virtues_on_valor) == 0:
                raise Exception(
                    'ERROR: No Valor exists with the specified valor_id {}'.
                    format(valor_id))
            else:
                raise Exception(
                    'ERROR: Valor currently has the following Virtue/s running on it: '
                    '{}'.format(virtues_on_valor))

    def destroy_valor(self, valor_id):

        valor_found = False
        for valor in self.get_empty_valors():
            if valor_id == valor['valor_id']:
                valor_found = True

        if valor_found:
            self.create_standby_valors()

            self.aws.instance_destroy(valor_id, block=False)

            self.rethinkdb_manager.remove_valor(valor_id)

            return valor_id
        else:
            virtues_on_valor = [
                valor['virtues'] for valor in self.list_valors()
                if valor['valor_id'] == valor_id
            ]
            # If no valors are found then the valor_id does not exist
            if len(virtues_on_valor) == 0:
                raise Exception(
                    'ERROR: No Valor exists with the specified valor_id {}'.
                    format(valor_id))
            else:
                raise Exception(
                    'ERROR: Valor currently has the following Virtue/s running on it: '
                    '{}'.format(virtues_on_valor))

    def migrate_virtue(self, virtue_id, destination_valor_id):

        virtue = self.rethinkdb_manager.get_virtue(virtue_id)

        current_valor = rethinkdb.db('transducers').table('galahad').filter({
            'function':
            'valor',
            'address':
            virtue['address']
        }).run(self.rethinkdb_manager.connection).next()

        destination_valor = self.rethinkdb_manager.get_valor(
            destination_valor_id)

        if current_valor['valor_id'] == destination_valor_id:
            raise Exception(
                ('ERROR: Source valor [{0}] and Destination Valor [{1}] '
                 'are the same'.format(current_valor['valor_id'],
                                       destination_valor_id)))

        virtues_on_dst_valor = rethinkdb.db('transducers').table(
            'galahad').filter({
                'function': 'virtue',
                'address': destination_valor['address']
            }).run(self.rethinkdb_manager.connection)

        dst_virtue_count = len(list(virtues_on_dst_valor))
        if (dst_virtue_count >= MAX_VIRTUES_PER_VALOR):
            raise Exception(('ERROR: Destination Valor has too many ({0})'
                             ' Virtues running on it'
                             ' to migrate.'.format(dst_virtue_count)))

        rethinkdb.db("transducers").table("commands") \
            .filter({'valor_ip': current_valor['address'],
                     'virtue_id': virtue_id}) \
            .update({'valor_dest': destination_valor['address'],
                     'enabled': True}).run(self.rethinkdb_manager.connection)

        rethinkdb.db('transducers').table('commands') \
            .filter({'virtue_id': virtue_id,
                     'transducer_id': 'introspection'}) \
            .update({"valor_id": destination_valor_id}).run(self.rethinkdb_manager.connection)

    def add_virtue(self,
                   valor_address,
                   valor_id,
                   virtue_id,
                   efs_path,
                   role_create=False):
        self.create_standby_valors()

        return self.rethinkdb_manager.add_virtue(valor_address, valor_id,
                                                 virtue_id, efs_path,
                                                 role_create)

    def list_virtues(self):
        return self.rethinkdb_manager.list_virtues()

    def auto_migration_start(self, migration_interval):
        self.rethinkdb_manager.auto_migration_start(migration_interval)

    def auto_migration_stop(self):
        self.rethinkdb_manager.auto_migration_stop()

    def auto_migration_status(self):
        return self.rethinkdb_manager.auto_migration_status()

    def is_auto_migration_on(self):
        return self.rethinkdb_manager.is_auto_migration_on()
Exemplo n.º 23
0
def main():
    """
    Main data execution.
    """

    # Variable to set exit code
    error_count = 0

    # Gather Region data and connect
    regions = boto.ec2.regions()
    for region in regions:
        if region.name in BAD_REGIONS:
            continue
        conn = AWS(region.name)
        connect = conn.connect_to_ec2()

        # Create table for bad data
        table = PrettyTable(
            ['Instance ID', 'Server Name', 'Environment', 'Product'])
        table.hrules = True
        table.format = True

        # Retrieve instance data
        reservations = connect.get_all_instances()
        if reservations:
            LOG.info("Instances found in region {}!".format(region.name))
            with open(HTML_FILE, 'a') as html:

                # Iterate through AWS instances, tag data
                for res in reservations:
                    for inst in res.instances:
                        for tag in inst.tags:

                            # Checks if Tag key exists
                            if "Name" in inst.tags and not None:
                                amazon_name = inst.tags['Name']
                            else:
                                amazon_name = "missing name"
                            if "Product" in inst.tags and not None:
                                amazon_product = inst.tags['Product']
                            else:
                                amazon_product = "missing product"
                            if "Environment" in inst.tags and not None:
                                amazon_environment = inst.tags['Environment']
                            else:
                                amazon_environment = "missing environment"
                                continue

                        # Check for bad tag values, and print those in table
                        bad_environment = check_data(amazon_environment,
                                                     CANONICAL_ENVIRONMENTS)
                        bad_product = check_data(amazon_product,
                                                 CANONICAL_PRODUCTS)
                        if bad_product is not None:
                            table.add_row(
                                [inst.id, amazon_name, "+", amazon_product])
                            error_count += 1
                        elif bad_environment is not None:
                            table.add_row([
                                inst.id, amazon_name, amazon_environment, "+"
                            ])
                            error_count += 1
                        else:
                            break

                # Print region name and coresponding data
                html.write("<h2> Region {}</h2>".format(region.name))
                html.write(table.get_html_string())
        else:
            LOG.info("There are no instances in region {}".format(region.name))

    # Prints closing html tags
    with open(HTML_FILE, 'a') as html_close:
        html_close.write('''
</body>
</html>
            ''')
        html_close.close()

    sys.exit(error_count)
Exemplo n.º 24
0
from weather_master import PackageReader
from aws import AWS

print("Start Master Module Simulator")

reader = PackageReader()

print("\nRxDone")

#payload = [0x0A, 0x05, 0x2E, 0x95, 0xF3, 0x71, 0x84, 0x00, 0x62, 0x11]
payload = [
    0x10, 0x05, 0x2E, 0x8F, 0xED, 0x8C, 0x8E, 0x00, 0x64, 0x17, 0x3F, 0x2C,
    0x23, 0x01, 0x89, 0x44
]

#10052E8FED8C8E0064173F2C23018944

print(bytes(payload).hex())

reader.read_package(payload, True)

db = AWS()

db.connect()
db.publish_sensor_data(reader.data_pack, True)
db.disconnect()
Exemplo n.º 25
0
import os
import shutil
import sys
import time
import unittest

# local modules
from aws import AWS

self = os.path.basename(sys.argv[0])
myName = os.path.splitext(self)[0]
log = logging.getLogger(myName)
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
log.setLevel(logging.DEBUG)

aws = AWS(logger=log)
test_bucket = 'af-example1'
test_instance_id = 'i-00d4bb1c2ee88275c'

@unittest.skip("Skipping Test01")
class Test01(unittest.TestCase):

    def test_get_buckets(self):
        buckets = aws.get_buckets()
        log.debug(buckets)
        self.assertGreater(len(buckets), 0)

    def test_get_instances(self):
        response = aws.get_instances()
        log.debug(response)
Exemplo n.º 26
0
class InfrastructureTemplate():
    u""" AWS Cloudformation Infrastructure Template """
    def __init__(self):
        u""" Infrastructure Class Contructor """
        self.aws = AWS()
        self.ami = AMI()
        self.ref_stack_id = Ref('AWS::StackId')
        self.ami_id = self.ami.minimal_linux_ami()

        # NOTE: Troposphere doesn't have a template feature to make KeyPairs
        #       So handle this ad-hoc for now.
        self.keypair_name = 'test-deploy-keypair'
        if self.keypair_doesnt_exist():
            self.create_keypair(self.keypair_name)

        self.deployment_bucket_prefix = 'test-deploy-bucket-'
        self.deployment_bucket_name = '{}{}'.format(
            self.deployment_bucket_prefix,
            uuid.uuid4().hex[:12].lower())
        self.deployment_bucket_location = None
        if self.deploy_bucket_doesnt_exist():
            self.deployment_bucket_location = self.create_deploy_bucket(
                self.deployment_bucket_name)
        else:
            self.deployment_bucket_location = self.get_bucket_url(
                self.deployment_bucket_name)

        self.server_certificate_name = 'test-deploy-certificate'
        self.server_certificate_arn = None
        if self.server_certificate_doesnt_exist():
            self.server_certificate_arn = self.upload_server_certificate()

        self.template = Template()
        self.template.add_version('2010-09-09')
        self.template.add_description(
            'AWS Cloudformation Template for autoscaled, load balance controlled EC2 service'
        )

        self.template.add_parameter(
            Parameter('KeyName',
                      Description='Name of an existing EC2 KeyPair',
                      Default=self.keypair_name,
                      Type='String'))

        self.template.add_parameter(
            Parameter('AmiId',
                      Description='Lastest Minimal Linux AMI',
                      Default=self.ami_id,
                      Type='String'))

        self.template.add_parameter(
            Parameter('DeployBucketName',
                      Description='Name of the deployment_bucket',
                      Default=self.deployment_bucket_name,
                      Type='String'))

        self.template.add_parameter(
            Parameter('DeployBucketLocation',
                      Description='Location of the deployment_bucket',
                      Default=self.deployment_bucket_location,
                      Type='String'))

        self.template.add_parameter(
            Parameter('ServerCertificateArn',
                      Description='Certificate ARN for the Load Balancer',
                      Default=self.server_certificate_arn,
                      Type='String'))

        self.sshlocation = self.template.add_parameter(
            Parameter(
                'SSHLocation',
                Description=
                'The IP address range that can be used to SSH to the EC2 instances',
                Type='String',
                MinLength='9',
                MaxLength='18',
                Default='0.0.0.0/0',
                AllowedPattern=
                r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
                ConstraintDescription=(
                    "must be a valid IP CIDR range of the form x.x.x.x/x.")))

        self.vpc = self.template.add_resource(
            VPC('TestDeployVpc',
                CidrBlock='10.0.0.0/16',
                Tags=Tags(Application=self.ref_stack_id)))

        self.subnet = self.template.add_resource(
            Subnet('TestDeploySubnet',
                   VpcId=Ref(self.vpc),
                   CidrBlock='10.0.0.0/24',
                   Tags=Tags(Application=self.ref_stack_id)))

        self.gateway = self.template.add_resource(
            InternetGateway('TestDeployGateway',
                            Tags=Tags(Application=self.ref_stack_id)))

        self.gatewayattach = self.template.add_resource(
            VPCGatewayAttachment('AttachGateway',
                                 VpcId=Ref(self.vpc),
                                 InternetGatewayId=Ref(self.gateway)))

        self.route_table = self.template.add_resource(
            RouteTable('RouteTable',
                       VpcId=Ref(self.vpc),
                       Tags=Tags(Application=self.ref_stack_id)))

        self.route = self.template.add_resource(
            Route('Route',
                  DependsOn='AttachGateway',
                  GatewayId=Ref('TestDeployGateway'),
                  DestinationCidrBlock='0.0.0.0/0',
                  RouteTableId=Ref(self.route_table)))

        self.subnet_route_association = self.template.add_resource(
            SubnetRouteTableAssociation(
                'SubnetRouteTableAssociation',
                SubnetId=Ref(self.subnet),
                RouteTableId=Ref(self.route_table),
                DependsOn=['TestDeploySubnet', 'RouteTable']))

        self.network_acl = self.template.add_resource(
            NetworkAcl('NetworkAcl',
                       VpcId=Ref(self.vpc),
                       Tags=Tags(Application=self.ref_stack_id)))

        self.inbound_private_http = self.template.add_resource(
            NetworkAclEntry('InboundHTTP',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='100',
                            Protocol='6',
                            PortRange=PortRange(To='80', From='80'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.inbound_private_http_alt = self.template.add_resource(
            NetworkAclEntry('InboundHTTPAlt',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='101',
                            Protocol='6',
                            PortRange=PortRange(To='8000', From='8000'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.inbound_private_https = self.template.add_resource(
            NetworkAclEntry('InboundHTTPS',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='102',
                            Protocol='6',
                            PortRange=PortRange(To='443', From='443'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.inbound_ssh = self.template.add_resource(
            NetworkAclEntry('InboundSSH',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='103',
                            Protocol='6',
                            PortRange=PortRange(To='22', From='22'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.inbound_response = self.template.add_resource(
            NetworkAclEntry('InboundResponsePorts',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='104',
                            Protocol='6',
                            PortRange=PortRange(To='65535', From='1024'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.outbound_http = self.template.add_resource(
            NetworkAclEntry('OutboundHTTP',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='100',
                            Protocol='6',
                            PortRange=PortRange(To='80', From='80'),
                            Egress='true',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.outbound_https = self.template.add_resource(
            NetworkAclEntry('OutboundHTTPS',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='101',
                            Protocol='6',
                            PortRange=PortRange(To='443', From='443'),
                            Egress='true',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.outbound_response = self.template.add_resource(
            NetworkAclEntry('OutboundResponsePorts',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='102',
                            Protocol='6',
                            PortRange=PortRange(To='65535', From='1024'),
                            Egress='true',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.subnet_network_association = self.template.add_resource(
            SubnetNetworkAclAssociation(
                'SubnetNetworkACLAssociation',
                SubnetId=Ref(self.subnet),
                NetworkAclId=Ref(self.network_acl),
                DependsOn=['TestDeploySubnet', 'NetworkAcl']))

        self.instance_security_group = self.template.add_resource(
            SecurityGroup('InstanceSecurityGroup',
                          GroupDescription='Open all ports',
                          SecurityGroupIngress=[
                              SecurityGroupRule(IpProtocol='tcp',
                                                FromPort='22',
                                                ToPort='22',
                                                CidrIp='0.0.0.0/0'),
                              SecurityGroupRule(IpProtocol='tcp',
                                                FromPort='1024',
                                                ToPort='65535',
                                                CidrIp='0.0.0.0/0')
                          ],
                          SecurityGroupEgress=[
                              SecurityGroupRule(IpProtocol='tcp',
                                                FromPort='1',
                                                ToPort='65535',
                                                CidrIp='0.0.0.0/0')
                          ],
                          VpcId=Ref(self.vpc)))

        self.instance = self.template.add_resource(
            Instance(
                'TestDeployInstance',
                ImageId=Ref('AmiId'),
                InstanceType='t2.micro',
                KeyName=Ref('KeyName'),
                NetworkInterfaces=[
                    NetworkInterfaceProperty(
                        GroupSet=[Ref('InstanceSecurityGroup')],
                        AssociatePublicIpAddress='true',
                        DeviceIndex='0',
                        DeleteOnTermination='true',
                        SubnetId=Ref('TestDeploySubnet'))
                ],
                UserData=Base64(
                    Join('', [
                        "#!/bin/bash\n",
                        "apt-get update\n",
                        "apt-get -y install python python-pip python-setuptools\n",
                        "mkdir aws-cfn-bootstrap-latest\n",
                        "curl https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz | tar xz -C aws-cfn-bootstrap-latest --strip-components 1\n",
                        "easy_install aws-cfn-bootstrap-latest\n",
                        "/usr/local/bin/cfn-init --stack ",
                        {
                            "Ref": "AWS::StackName"
                        },
                        " --resource TestDeployInstance",
                        " --region ",
                        {
                            "Ref": "AWS::Region"
                        },
                        "\n",
                        "/usr/local/bin/cfn-signal --exit-code $? '",
                        {
                            "Ref": "WaitHandle"
                        },
                        "'\n"
                        "\n",
                        "python -m SimpleHTTPServer 8000 2>&1 >/dev/null &\n",
                    ])),
                DependsOn=['InstanceSecurityGroup', 'TestDeploySubnet'],
                Tags=Tags(Application=self.ref_stack_id)))

        # self.load_balancer_security_group = self.template.add_resource(
        #     SecurityGroup('LoadBalancerSecurityGroup',
        #                   GroupDescription='Open all ports',
        #                   SecurityGroupIngress=[
        #                       SecurityGroupRule(
        #                           IpProtocol='tcp',
        #                           FromPort='1',
        #                           ToPort='65535',
        #                           CidrIp='0.0.0.0/0')],
        #                   SecurityGroupEgress=[
        #                       SecurityGroupRule(
        #                           IpProtocol='tcp',
        #                           FromPort='1',
        #                           ToPort='65535',
        #                           CidrIp='0.0.0.0/0')],
        #                   VpcId=Ref(self.vpc)))

        # self.launch_configuration = self.template.add_resource(
        #     LaunchConfiguration('LaunchConfiguration',
        #                         AssociatePublicIpAddress=True,
        #                         UserData=Base64(Join('', [
        #                             "#!/bin/bash\n",
        #                             "sudo pip install ansible\n",
        #                             "sudo pip install SimpleHTTPServer\n",
        #                             "python -m SimpleHTTPServer 8000 2>&1 >/dev/null &\n",
        #                             "cfn-signal -e 0",
        #                             "    --resource AutoScalingGroup",
        #                             "    --stack ", Ref("AWS::StackName"),
        #                             "    --region ", Ref("AWS::Region"), "\n"
        #                         ])),
        #                         ImageId=Ref('AmiId'),
        #                         KeyName=Ref('KeyName'),
        #                         BlockDeviceMappings=[
        #                             ec2.BlockDeviceMapping(
        #                                 DeviceName="/dev/sda1",
        #                                 Ebs=ec2.EBSBlockDevice(
        #                                     VolumeSize="8"
        #                                 )
        #                             )
        #                         ],
        #                         SecurityGroups=[Ref('InstanceSecurityGroup')],
        #                         InstanceType="t2.micro",
        #                         DependsOn='InstanceSecurityGroup'))

        # self.load_balancer = self.template.add_resource(
        #     LoadBalancer(
        #         "LoadBalancer",
        #         ConnectionDrainingPolicy=elb.ConnectionDrainingPolicy(
        #             Enabled=True,
        #             Timeout=120,
        #         ),
        #         Subnets=[Ref('TestDeploySubnet')],
        #         HealthCheck=elb.HealthCheck(
        #             Target='HTTP:8000/',
        #             HealthyThreshold='10',
        #             UnhealthyThreshold='10',
        #             Interval='300',
        #             Timeout='60'
        #         ),
        #         Listeners=[
        #             elb.Listener(
        #                 LoadBalancerPort='443',
        #                 InstancePort='8000',
        #                 Protocol='HTTPS',
        #                 InstanceProtocol='HTTP',
        #                 SSLCertificateId=Ref('ServerCertificateArn')
        #             ),
        #             elb.Listener(
        #                 LoadBalancerPort='22',
        #                 InstancePort='22',
        #                 Protocol='TCP',
        #                 InstanceProtocol='TCP'
        #             )
        #         ],
        #         CrossZone=True,
        #         SecurityGroups=[Ref('LoadBalancerSecurityGroup')],
        #         LoadBalancerName='api-lb',
        #         Scheme='internet-facing',
        #         DependsOn=['LoadBalancerSecurityGroup',
        #                    'TestDeploySubnet']))

        # self.auto_scaling_group = self.template.add_resource(
        #     AutoScalingGroup(
        #         "AutoscalingGroup",
        #         DesiredCapacity=1,
        #         Tags=[
        #             Tag("Environment", 'EnvType', True)
        #         ],
        #         LaunchConfigurationName=Ref(self.launch_configuration),
        #         MinSize=1,
        #         MaxSize=2,
        #         VPCZoneIdentifier=[Ref(self.subnet)],
        #         LoadBalancerNames=[Ref(self.load_balancer)],
        #         HealthCheckType='EC2',
        #         UpdatePolicy=UpdatePolicy(
        #             AutoScalingReplacingUpdate=AutoScalingReplacingUpdate(
        #                 WillReplace=True,
        #             ),
        #             AutoScalingRollingUpdate=AutoScalingRollingUpdate(
        #                 PauseTime='PT5M',
        #                 MinInstancesInService="1",
        #                 MaxBatchSize='1',
        #                 WaitOnResourceSignals=True
        #             )
        #         )
        #     ))

    @staticmethod
    def get_bucket_url(bucket_name):
        u""" Generates the bucket location """
        return 'https://{}.s3.amazonaws.com/'.format(bucket_name)

    def keypair_doesnt_exist(self):
        u""" Check to see if EC2 keypair exists """
        client = self.aws.get_client('ec2')
        try:
            client.describe_key_pairs(KeyNames=[self.keypair_name])
        except ClientError:
            return True
        return False

    def create_keypair(self, keypair_name):
        u""" Create a keypair resource """
        client = self.aws.get_client('ec2')
        try:
            res = client.create_key_pair(KeyName=keypair_name)
            print(res['KeyMaterial'])
            open('ansible/test-deploy.pem', 'w').write(res['KeyMaterial'])
        except ClientError as ex:
            sys.exit(ex)

    def deploy_bucket_doesnt_exist(self):
        u""" Check to see if the deployment bucket exists """
        client = self.aws.get_client('s3')
        try:
            res = client.list_buckets()
            for bucket in res['Buckets']:
                if self.deployment_bucket_prefix in bucket['Name']:
                    self.delpoyment_bucket_name = bucket['Name']
                    return False
            return True
        except ClientError as ex:
            sys.exit(ex)

    def create_deploy_bucket(self, bucket_name):
        u""" Create static deployment bucket """
        client = self.aws.get_client('s3')
        try:
            res = client.create_bucket(ACL='private',
                                       Bucket=bucket_name,
                                       CreateBucketConfiguration=\
                                       {'LocationConstraint': self.aws.session.region_name})
            print('Create Bucket: {}'.format(res))
            return res['Location']
        except ClientError as ex:
            sys.exit(ex)

    def server_certificate_doesnt_exist(self):
        u""" Check for the presence of the server certificate """
        client = self.aws.get_client('iam')
        res = client.list_server_certificates()
        for cert in res['ServerCertificateMetadataList']:
            if self.server_certificate_name in cert['ServerCertificateName']:
                self.server_certificate_arn = cert['Arn']
                return False
        return True

    def upload_server_certificate(self):
        u""" Upload server certificate to AWS ACM """
        client = self.aws.get_client('iam')
        certificate = open('cert.pem', 'r').read()
        private_key = open('key.pem', 'r').read()
        try:
            res = client.upload_server_certificate(
                ServerCertificateName=self.server_certificate_name,
                CertificateBody=certificate,
                PrivateKey=private_key)
            from pprint import pprint as pp
            pp('Uploaded: {}'.format(res))
            return res['ServerCertificateMetadata']['Arn']
        except ClientError as ex:
            print(ex)
            sys.exit()

    def cleanup(self):
        u""" Cleanup static resources """
        self.destroy_deploy_bucket()
        self.destroy_ec2_keypair(self.keypair_name)
        self.delete_server_certificate(self.server_certificate_name)

    def destroy_deploy_bucket(self):
        u""" Destroy static deployment bucket(s) """
        client = self.aws.get_client('s3')
        buckets = client.list_buckets()
        for bucket in buckets['Buckets']:
            objects = client.list_objects_v2(Bucket=bucket['Name'])
            while True:
                if 'Contents' not in objects:
                    break
                for s3_object in objects['Contents']:
                    if 'test-deploy-bucket-' in s3_object['Key']:
                        try:
                            client.delete_object(Bucket=bucket['Name'],
                                                 Key=s3_object['Key'])
                        except ClientError as ex:
                            print(ex)
                            continue
                if objects['IsTruncated']:
                    token = objects['ContinuationToken']
                    objects = client.list_objects_v2(bucket['Name'],
                                                     ContinuationToken=token)
                else:
                    break
            result = client.delete_bucket(Bucket=bucket['Name'])
            print(result)

    def destroy_ec2_keypair(self, keypair):
        u""" Destroy static ec2 keypair resources """
        client = self.aws.get_client('ec2')
        try:
            client.delete_key_pair(KeyName=keypair)
        except ClientError as ex:
            print(ex)

    def delete_server_certificate(self, certificate_name):
        u""" Remove certificate from AWS Certificate Manager """
        client = self.aws.get_client('iam')
        try:
            client.delete_server_certificate(
                ServerCertificateName=certificate_name)
        except ClientError as ex:
            print(ex)

    def print_template(self, output='yaml'):
        u""" Dump Cloudformation Template """
        if 'yaml' not in output:
            print(self.template.to_json())
        else:
            print(self.template.to_yaml())

    def generate_template(self, output='yaml'):
        u""" Return the Cloudformation Template Body """
        if 'yaml' not in output:
            return self.template.to_json()
        return self.template.to_yaml()

    def prepare_payload(self):
        u""" Collect all payload files in a zip archive """
        if os.path.isfile('test-deploy-files.zip'):
            os.remove('test-deploy-files.zip')
        zipf = zipfile.ZipFile('test-deploy-files.zip', 'w',
                               zipfile.ZIP_DEFLATED)
        self.zipdir('ansible/', zipf)
        zipf.close()

    @staticmethod
    def zipdir(path, zipf):
        u""" Put all the contents of a directory into a zipfile """
        for root, _, files in os.walk(path):
            for file in files:
                zipf.write(os.path.join(root, file))

    def deliver_payload(self, bucket_name):
        u""" Put the necessary files into S3 """
        client = self.aws.get_client('s3')
        res = client.upload_file('test-deploy-files.zip',
                                 bucket_name,
                                 Key='test-delpoy-files.zip')
        from pprint import pprint as pp
        pp(res)
Exemplo n.º 27
0
class LoRaMaster(LoRa):
    def __init(self):
        super(LoRaRcvCont, self).__init__(verbose)
        self.set_mode(MODE.STDBY)
        #self.set_dio_mapping([0] * 6)
        self.db = AWS()

    def on_rx_done(self):
        BOARD.led_on()

        self.clear_irq_flags(RxDone=1)
        reader.read_package(self.read_payload(nocheck=True), True)

        self.set_mode(MODE.SLEEP)

        self.reset_ptr_rx()
        BOARD.led_off()
        self.set_mode(MODE.RXCONT)

        if (reader.is_accepted):
            self.db.connect()
            self.db.publish_sensor_data(reader.data_pack, True)
            self.db.disconnect()

    def on_txdone(self):
        print("\nTxDone")
        print(self.get_irq_flags())

    def on_cad_done(self):
        print("\non_CadDone")
        print(self.get_irq_flags())

    def on_rx_timeout(self):
        print("\non_RxTimeout")
        print(self.get_irq_flags())
        time.sleep(.5)
        self.set_mode(MODE.SLEEP)
        self.reset_ptr_rx()
        self.set_mode(MODE.RXCONT)

    def on_valid_header(self):
        print("\non_ValidHeader")
        print(self.get_irq_flags())

    def on_payload_crc_error(self):
        print("\non_PayloadCrcError")
        print(self.get_irq_flags())

    def on_fhss_change_channel(self):
        print("\non_Fhss_changeChannel")
        print(self.get_irq_flags())

    def print_payload(self, payload):
        chex_sum = payload[0]
        cmd = payload[1]

        print("chex_sum: \t{}".format(chex_sum))
        print("payload len: \t{}".format(len(payload)))

        if (chex_sum == len(payload)):
            print("payload of correct length")
        else:
            print("payload of incorrect length")
        print("command: \t{}".format(cmd))

    def start(self):
        self.db = AWS()
        self.reset_ptr_rx()
        self.set_mode(MODE.RXCONT)

        while True:
            time.sleep(.5)
            rssi_value = self.get_rssi_value()
            snr_value = self.get_pkt_snr_value()
            status = self.get_modem_status()
            sys.stdout.flush()
            sys.stdout.write("\r%d %d %d %d" %
                             (rssi_value, snr_value, status['rx_ongoing'],
                              status['modem_clear']))
Exemplo n.º 28
0
            print("Unlocking Door")
            lock.unlock_door()
            print("Door Locked")

        # if audio is set to new in Firebase, download the latest audio file and play it via the speaker
        if audio == "new":
            fb.get_storage()
            os.system("omxplayer audioVisitor.mp3")
            fb.update_data({'doorbell/audio/state': 'waiting'})


if __name__ == '__main__':
    # Firebase class instance
    fb = Firebase()
    # AWS class instance
    aws = AWS(fb)
    # NewFace class instance
    nf = NewFace(fb)
    # Solenoid Lock class instance
    lock = Solenoid()
    # Face Check class instance
    fc = FaceCheck()
    # Doorbell button class instance
    bb = BellButton(aws, nf, switch_face_check_on, switch_face_check_off)
    # Start the BellButton Thread class
    bb.start()
    # Start the Face Check Thread class
    fc.start()
    # Turn on face check
    switch_face_check_on()
    # Run the listen function
Exemplo n.º 29
0
class ETL:
    def __init__(self):
        self.dao = DAO()
        self.aws = AWS()

    def check_monthly_ranking(self):
        intervals = ["1_20", "21_40", "41_60", "61_80", "81_100"]
        for interval in intervals:
            url = "http://www.dmm.co.jp/mono/dvd/-/ranking/=/term=monthly/mode=actress/rank=" + interval
            r = requests.get(url)
            soup = bs(r.text)
            actresses = soup.find_all("td", {"class": "bd-b"})
            for actress in actresses:
                actress_a = actress.find("a")
                pattern = re.compile(
                    "/mono/dvd/-/list/=/article=actress/id=(.*)/")
                match = pattern.search(actress_a.get("href"))
                actress_id = match.group(1)

                detail = self.dao.find_one_actress_by_id(actress_id)
                if detail is None or detail.get("name") is None:
                    img = actress_a.find("img")
                    actress_img = img.get("src").replace('medium/', '')
                    actress_name = img.get("alt").encode('utf-8')
                    print actress_name

                    self.dao.update_one_info_by_actress({
                        "id": actress_id,
                        "name": actress_name,
                        "img": actress_img
                    })
                    self.aws.insert_index_face(
                        actress_id,
                        urllib2.urlopen(actress_img).read())

    def check_new_actress(self):
        self.get_new_actress("http://actress.dmm.co.jp/-/top/",
                             "act-box-125 group", 1,
                             "/-/detail/=/actress_id=(.*)/")
        self.get_new_actress("http://www.dmm.co.jp/mono/dvd/-/actress/",
                             "act-box-100 group mg-b20", 0,
                             "/mono/dvd/-/list/=/article=actress/id=(.*)/")

    def get_new_actress(self, url, cssClass, index, hrefPattern):
        r = requests.get(url)
        soup = bs(r.text)

        act_box = soup.find_all("ul", {"class": cssClass})
        actresses = act_box[index].find_all("a")
        for actress in actresses:
            pattern = re.compile(hrefPattern)
            match = pattern.search(actress.get("href"))
            actress_id = match.group(1)

            detail = self.dao.find_one_actress_by_id(actress_id)
            if detail is None or detail.get("name") is None:
                actress_img = actress.find("img").get("src").replace(
                    'medium/', '')
                actress_name = actress.text.encode('utf-8')
                print actress_name

                self.dao.update_one_info_by_actress({
                    "id": actress_id,
                    "name": actress_name,
                    "img": actress_img
                })
                self.aws.insert_index_face(actress_id,
                                           urllib2.urlopen(actress_img).read())

    def check_new_works(self):
        now = datetime.datetime.now()
        year = str(now.year)
        month = str(now.month)
        day = str(now.day)
        url = "http://www.dmm.co.jp/mono/dvd/-/calendar/=/month=" + month + "/year=" + year + "/day=" + day + "-" + day + "/"
        r = requests.get(url)
        soup = bs(r.text)
        print datetime.date.today()

        cal = soup.find("table", {"id": "monocal"})
        works_list = cal.find_all("tr")
        if len(works_list) == 0:
            return

        for works in works_list:
            actress_tag = works.find("td", {"class": "info-01"})
            if actress_tag is None or actress_tag.text == "----":
                continue

            pattern = re.compile("/mono/dvd/-/list/=/article=actress/id=(.*)/")
            match = pattern.search(actress_tag.find("a").get("href"))
            actress_id = match.group(1)
            if self.dao.find_one_actress_by_id(actress_id) is None:
                continue

            title_tag = works.find("td", {"class": "title-monocal"})
            title = title_tag.find("a")
            title_name = title.text
            pattern = re.compile(
                ur"(^(【数量限定】|【DMM限定】|【DMM限定販売】|【アウトレット】|【特選アウトレット】)|(ブルーレイディスク)$)",
                re.UNICODE)
            match = re.search(pattern, title_name)
            if match:
                continue

            title_url = "http://www.dmm.co.jp" + title.get("href")
            detail = self.get_works_detail(title_url, actress_id)
            if detail is None:
                continue
            else:
                print detail.get("img")
                self.aws.insert_index_face(
                    detail.get("id"),
                    urllib2.urlopen(detail.get("img")).read())

    def get_works_detail(self, url, actress_id):
        r = requests.get(url)
        soup = bs(r.text)

        sample = soup.find("div", {"class": "tx10 pd-3 lh4"})
        if sample is None:
            return

        # No Image
        a_tag = sample.find("a")
        if a_tag is None:
            return

        print a_tag.get('href')

        performer = soup.find("span", {"id": "performer"})
        performer_a_tag = performer.find_all("a")
        if len(performer_a_tag) == 1:
            pattern = re.compile("/mono/dvd/-/detail/=/cid=(.*)/")
            match = pattern.search(url)
            cid = match.group(1)
            print cid

            project_works = self.dao.find_one_works_by_id(actress_id)
            print project_works

            if bool(project_works) and any(
                    cid in s for s in project_works.get("works")):
                return
            else:
                self.dao.update_one_works_by_id(actress_id, cid)
                self.get_sample(soup, actress_id)
                return {"id": actress_id, "img": a_tag.get('href')}
        else:
            return

    def get_sample(self, soup, actress_id):
        sample_image = soup.find("div", {"id": "sample-image-block"})
        if sample_image is None:
            return

        sample_head = soup.find("div", {"class": "headline mg-b10 lh3"})
        sample_small = sample_head.find("span", {"class": "nw"})
        if sample_small is not None:
            return

        images = map(lambda tag: re.sub(r'-(\d+)', r'jp-\1', tag.get("src")),
                     sample_image.find_all("img"))
        for image in images:
            print image
            self.aws.insert_index_face(actress_id,
                                       urllib2.urlopen(image).read())
Exemplo n.º 30
0
    def __init__(self):
        u""" Infrastructure Class Contructor """
        self.aws = AWS()
        self.ami = AMI()
        self.ref_stack_id = Ref('AWS::StackId')
        self.ami_id = self.ami.minimal_linux_ami()

        # NOTE: Troposphere doesn't have a template feature to make KeyPairs
        #       So handle this ad-hoc for now.
        self.keypair_name = 'test-deploy-keypair'
        if self.keypair_doesnt_exist():
            self.create_keypair(self.keypair_name)

        self.deployment_bucket_prefix = 'test-deploy-bucket-'
        self.deployment_bucket_name = '{}{}'.format(
            self.deployment_bucket_prefix,
            uuid.uuid4().hex[:12].lower())
        self.deployment_bucket_location = None
        if self.deploy_bucket_doesnt_exist():
            self.deployment_bucket_location = self.create_deploy_bucket(
                self.deployment_bucket_name)
        else:
            self.deployment_bucket_location = self.get_bucket_url(
                self.deployment_bucket_name)

        self.server_certificate_name = 'test-deploy-certificate'
        self.server_certificate_arn = None
        if self.server_certificate_doesnt_exist():
            self.server_certificate_arn = self.upload_server_certificate()

        self.template = Template()
        self.template.add_version('2010-09-09')
        self.template.add_description(
            'AWS Cloudformation Template for autoscaled, load balance controlled EC2 service'
        )

        self.template.add_parameter(
            Parameter('KeyName',
                      Description='Name of an existing EC2 KeyPair',
                      Default=self.keypair_name,
                      Type='String'))

        self.template.add_parameter(
            Parameter('AmiId',
                      Description='Lastest Minimal Linux AMI',
                      Default=self.ami_id,
                      Type='String'))

        self.template.add_parameter(
            Parameter('DeployBucketName',
                      Description='Name of the deployment_bucket',
                      Default=self.deployment_bucket_name,
                      Type='String'))

        self.template.add_parameter(
            Parameter('DeployBucketLocation',
                      Description='Location of the deployment_bucket',
                      Default=self.deployment_bucket_location,
                      Type='String'))

        self.template.add_parameter(
            Parameter('ServerCertificateArn',
                      Description='Certificate ARN for the Load Balancer',
                      Default=self.server_certificate_arn,
                      Type='String'))

        self.sshlocation = self.template.add_parameter(
            Parameter(
                'SSHLocation',
                Description=
                'The IP address range that can be used to SSH to the EC2 instances',
                Type='String',
                MinLength='9',
                MaxLength='18',
                Default='0.0.0.0/0',
                AllowedPattern=
                r"(\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})/(\d{1,2})",
                ConstraintDescription=(
                    "must be a valid IP CIDR range of the form x.x.x.x/x.")))

        self.vpc = self.template.add_resource(
            VPC('TestDeployVpc',
                CidrBlock='10.0.0.0/16',
                Tags=Tags(Application=self.ref_stack_id)))

        self.subnet = self.template.add_resource(
            Subnet('TestDeploySubnet',
                   VpcId=Ref(self.vpc),
                   CidrBlock='10.0.0.0/24',
                   Tags=Tags(Application=self.ref_stack_id)))

        self.gateway = self.template.add_resource(
            InternetGateway('TestDeployGateway',
                            Tags=Tags(Application=self.ref_stack_id)))

        self.gatewayattach = self.template.add_resource(
            VPCGatewayAttachment('AttachGateway',
                                 VpcId=Ref(self.vpc),
                                 InternetGatewayId=Ref(self.gateway)))

        self.route_table = self.template.add_resource(
            RouteTable('RouteTable',
                       VpcId=Ref(self.vpc),
                       Tags=Tags(Application=self.ref_stack_id)))

        self.route = self.template.add_resource(
            Route('Route',
                  DependsOn='AttachGateway',
                  GatewayId=Ref('TestDeployGateway'),
                  DestinationCidrBlock='0.0.0.0/0',
                  RouteTableId=Ref(self.route_table)))

        self.subnet_route_association = self.template.add_resource(
            SubnetRouteTableAssociation(
                'SubnetRouteTableAssociation',
                SubnetId=Ref(self.subnet),
                RouteTableId=Ref(self.route_table),
                DependsOn=['TestDeploySubnet', 'RouteTable']))

        self.network_acl = self.template.add_resource(
            NetworkAcl('NetworkAcl',
                       VpcId=Ref(self.vpc),
                       Tags=Tags(Application=self.ref_stack_id)))

        self.inbound_private_http = self.template.add_resource(
            NetworkAclEntry('InboundHTTP',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='100',
                            Protocol='6',
                            PortRange=PortRange(To='80', From='80'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.inbound_private_http_alt = self.template.add_resource(
            NetworkAclEntry('InboundHTTPAlt',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='101',
                            Protocol='6',
                            PortRange=PortRange(To='8000', From='8000'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.inbound_private_https = self.template.add_resource(
            NetworkAclEntry('InboundHTTPS',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='102',
                            Protocol='6',
                            PortRange=PortRange(To='443', From='443'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.inbound_ssh = self.template.add_resource(
            NetworkAclEntry('InboundSSH',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='103',
                            Protocol='6',
                            PortRange=PortRange(To='22', From='22'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.inbound_response = self.template.add_resource(
            NetworkAclEntry('InboundResponsePorts',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='104',
                            Protocol='6',
                            PortRange=PortRange(To='65535', From='1024'),
                            Egress='false',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.outbound_http = self.template.add_resource(
            NetworkAclEntry('OutboundHTTP',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='100',
                            Protocol='6',
                            PortRange=PortRange(To='80', From='80'),
                            Egress='true',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.outbound_https = self.template.add_resource(
            NetworkAclEntry('OutboundHTTPS',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='101',
                            Protocol='6',
                            PortRange=PortRange(To='443', From='443'),
                            Egress='true',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.outbound_response = self.template.add_resource(
            NetworkAclEntry('OutboundResponsePorts',
                            NetworkAclId=Ref(self.network_acl),
                            RuleNumber='102',
                            Protocol='6',
                            PortRange=PortRange(To='65535', From='1024'),
                            Egress='true',
                            RuleAction='allow',
                            CidrBlock='0.0.0.0/0'))

        self.subnet_network_association = self.template.add_resource(
            SubnetNetworkAclAssociation(
                'SubnetNetworkACLAssociation',
                SubnetId=Ref(self.subnet),
                NetworkAclId=Ref(self.network_acl),
                DependsOn=['TestDeploySubnet', 'NetworkAcl']))

        self.instance_security_group = self.template.add_resource(
            SecurityGroup('InstanceSecurityGroup',
                          GroupDescription='Open all ports',
                          SecurityGroupIngress=[
                              SecurityGroupRule(IpProtocol='tcp',
                                                FromPort='22',
                                                ToPort='22',
                                                CidrIp='0.0.0.0/0'),
                              SecurityGroupRule(IpProtocol='tcp',
                                                FromPort='1024',
                                                ToPort='65535',
                                                CidrIp='0.0.0.0/0')
                          ],
                          SecurityGroupEgress=[
                              SecurityGroupRule(IpProtocol='tcp',
                                                FromPort='1',
                                                ToPort='65535',
                                                CidrIp='0.0.0.0/0')
                          ],
                          VpcId=Ref(self.vpc)))

        self.instance = self.template.add_resource(
            Instance(
                'TestDeployInstance',
                ImageId=Ref('AmiId'),
                InstanceType='t2.micro',
                KeyName=Ref('KeyName'),
                NetworkInterfaces=[
                    NetworkInterfaceProperty(
                        GroupSet=[Ref('InstanceSecurityGroup')],
                        AssociatePublicIpAddress='true',
                        DeviceIndex='0',
                        DeleteOnTermination='true',
                        SubnetId=Ref('TestDeploySubnet'))
                ],
                UserData=Base64(
                    Join('', [
                        "#!/bin/bash\n",
                        "apt-get update\n",
                        "apt-get -y install python python-pip python-setuptools\n",
                        "mkdir aws-cfn-bootstrap-latest\n",
                        "curl https://s3.amazonaws.com/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz | tar xz -C aws-cfn-bootstrap-latest --strip-components 1\n",
                        "easy_install aws-cfn-bootstrap-latest\n",
                        "/usr/local/bin/cfn-init --stack ",
                        {
                            "Ref": "AWS::StackName"
                        },
                        " --resource TestDeployInstance",
                        " --region ",
                        {
                            "Ref": "AWS::Region"
                        },
                        "\n",
                        "/usr/local/bin/cfn-signal --exit-code $? '",
                        {
                            "Ref": "WaitHandle"
                        },
                        "'\n"
                        "\n",
                        "python -m SimpleHTTPServer 8000 2>&1 >/dev/null &\n",
                    ])),
                DependsOn=['InstanceSecurityGroup', 'TestDeploySubnet'],
                Tags=Tags(Application=self.ref_stack_id)))
Exemplo n.º 31
0
def search_amazon(request, isbn):
    from aws import AWS
    import xml.etree.ElementTree as ET
    import urllib2
    import logging
    from datetime import datetime, date
    
    XMLNS = "{http://webservices.amazon.com/AWSECommerceService/2010-06-01}"
    
    import myconfig
    aws = AWS()

    request_url = aws.doItemLookUp(isbn)
    logging.info(request_url)
    result = urllib2.urlopen(request_url)
    
    tree = ET.parse(result)
    root = tree.getroot()
    
    
    asin=""
    detail_page_shop_url=""
    title=""
    author=""
    publisher=""
    publication_date=""
    price=""
    image_url=""
    err_msg=""
    for node in root.getchildren():
        if node.tag == XMLNS + "Items":
            for subnode in node.getchildren():
                if subnode.tag == XMLNS + "Request":
                    for subnode2 in subnode.getchildren():
                        if subnode2.tag ==  XMLNS + "Errors":
                            for errnode in subnode2.getchildren():
                                if errnode.tag == XMLNS + "Error":
                                    for msg in errnode.getchildren():
                                        if msg.tag == XMLNS + "Message":
                                            err_msg = msg.text
                if subnode.tag == XMLNS + "Item":
                    for item in subnode.getchildren():
                        if item.tag == XMLNS + "ASIN":
                            asin = item.text
                        if item.tag == XMLNS + "DetailPageURL":
                            detail_page_shop_url = item.text
                        if item.tag == XMLNS + "MediumImage":
                            for imageurl in item.getchildren():
                                if imageurl.tag == XMLNS + "URL":
                                    image_url = imageurl.text
                        if item.tag == XMLNS + "ItemAttributes":
                            for item_attr in item.getchildren():                                
                                if item_attr.tag == XMLNS + "Title":
                                    title = item_attr.text
                                if item_attr.tag == XMLNS + "Author":
                                    author = item_attr.text
                                if item_attr.tag == XMLNS + "ListPrice":
                                    for listprice in item_attr.getchildren():
                                        if listprice.tag == XMLNS + "Amount":
                                            price = listprice.text
                                if item_attr.tag == XMLNS + "Publisher":
                                    publisher = item_attr.text
                                if item_attr.tag == XMLNS + "PublicationDate":                                    
                                    publication_date = item_attr.text

    try:
        price = int(price)
    except ValueError:
        price = 0
    try:
        st = datetime.strptime(publication_date,'%Y-%m-%d')
        pub_date = date(st.year, st.month, st.day)
    except ValueError:
        try:
            st = datetime.strptime(publication_date,'%Y-%m')
            pub_date = date(st.year, st.month, st.day)
        except ValueError:
            pub_date = None
             
    book = Book(asin=asin, isbn=isbn,detail_page_shop_url=detail_page_shop_url,title=title,author=author,
                publisher=publisher,publication_date=pub_date,price=price,image_url=image_url, 
                tags=[], deletion_reserve=False, lending=False, version=1)
    form = BookForm(instance=book)
    
    return render_to_response('register/search_result.html', {'form': form.as_widget()})
Exemplo n.º 32
0
 def __init(self):
     super(LoRaRcvCont, self).__init__(verbose)
     self.set_mode(MODE.STDBY)
     #self.set_dio_mapping([0] * 6)
     self.db = AWS()
Exemplo n.º 33
0
 def __init__(self):
     self.dao = DAO()
     self.aws = AWS()
Exemplo n.º 34
0
class Resources(object):
    def __init__(self):
        self.instances = {}
        self.volumes = {}
        self.snapshots = {}
        self.filtered_tag_keys = []
        self.require_tags_instance = []
        self.metric_prefix = 'resource_tagger_'
        self.metrics = {}
        self.aws = AWS()

        self.tag_default_copy_key = ''
        self.tag_default_copy_split = ['-', 2]

        self.setup()

    def setup(self):
        self.instances.clear()
        self.volumes.clear()
        self.snapshots.clear()

        # Filters
        fk = os.getenv("TAG_FILTER_KEYS_INSTANCE" or [])
        if isinstance(fk, list):
            self.filtered_tag_keys = fk
        elif isinstance(fk, str):
            self.filtered_tag_keys = fk.split(',')
        else:
            self.filtered_tag_keys = []

        # Require tags for Instance
        rt = os.getenv("TAG_REQUIRED_KEYS_INSTANCE" or [])
        if isinstance(rt, list):
            self.require_tags_instance = rt
        elif isinstance(rt, str):
            self.require_tags_instance = rt.split(',')
        else:
            self.require_tags_instance = []

        # Copy tag value when it's defined
        self.tag_default_copy_key = os.getenv("TAG_DEFAULT_COPY_KEY" or '')
        ts = os.getenv("TAG_DEFAULT_COPY_SPLIT" or '')
        if isinstance(ts, list):
            self.tag_default_copy_split = ts
        elif isinstance(ts, str):
            self.tag_default_copy_split = ts.split(',')
        else:
            self.tag_default_copy_split = []

        # Metrics
        self.metrics = {
            "total_instances": 0,
            "total_volumes": 0,
            "total_snapshots": 0,
        }

    def load_info_instances(self):
        for instance in self.aws.get_instances():
            try:
                i = self.instances[instance["resourceId"]]
            except KeyError:
                self.instances[instance["resourceId"]] = {}
                i = self.instances[instance["resourceId"]]
                i["volumes"] = {}
                i["volumes_ebs"] = {}
            i["tags"] = copy.deepcopy(utils.tag_list_to_dict(instance["tags"]))
            try:
                i["imageId"] = instance["configuration"]["imageId"]
            except:
                pass
            for b in instance["configuration"]["blockDeviceMappings"]:
                try:
                    i["volumes_ebs"][b["ebs"]["volumeId"]] = b["deviceName"]
                except KeyError:
                    i["volumes"][b["deviceName"]] = b

    def init_info_instances(self):
        """Init will load instances to dict only if the instances is empty """
        if len(self.instances) <= 0:
            self.load_info_instances()
        return

    def load_info_volumes(self):
        """Load all valumes that has not tags """

        self.init_info_instances()

        volumes = self.aws.get_volumes()
        for vol in volumes:
            if 'Tags' in volumes[vol]:
                # Ignor volumes that alread have tags.
                # TODO: enforce default tags when it's not present.
                continue

            v = copy.deepcopy(volumes[vol])
            if vol not in self.volumes:
                self.volumes[vol] = {}

            if v['SnapshotId'] != "":
                self.volumes[vol]["snapshoot"] = v['SnapshotId']
                try:
                    s = self.snapshots[v['SnapshotId']]
                except KeyError:
                    self.snapshots[v['SnapshotId']] = {}
                    s = self.snapshots[v['SnapshotId']]
                s["volume_id"] = vol

            if len(v["Attachments"]) > 0:
                for a in v["Attachments"]:
                    try:
                        cur_tags = self.tag_filter(
                            self.instances[a['InstanceId']]["tags"])
                        # self.volumes[vol]["instance_tags"] = new_tags
                    except KeyError:
                        cur_tags = {"error": "Instance tag's is NotFound."}
                    self.volumes[vol]["instance_attached"] = a['InstanceId']
                    self.volumes[vol]["attached"] = "yes"
                    new_tags = copy.deepcopy(cur_tags)
                    try:
                        device = self.instances[
                            a['InstanceId']]["volumes_ebs"][vol]
                    except KeyError:
                        device = ''
                    if 'Name' in new_tags:
                        new_tags["Name"] += " " + device
                    self.volumes[vol]["tags"] = new_tags
            else:
                self.volumes[vol]["attached"] = "no"

        return

    def show_report(self):
        if LOG_LEVEL == "DEBUG":
            print(">> EC2 Instances: ")
            pprint(self.instances)

            print(">> Snapshoots: ")
            pprint(self.snapshots)

            print(">> Untagged volumes: ")
            pprint(self.volumes)

        msg = ("Total untagged volumes: {}".format(len(self.volumes)))
        #logger.info(msg)
        print(msg)
        msg = ("Total snapshoots to tag: {}".format(len(self.snapshots)))
        #logger.info(msg)
        print(msg)

    def add_metrics(self):
        self.aws.add_metrics(
            data={
                "name": "total_untagged_resources",
                "value": len(self.volumes),
                "dimensions": {
                    "Name": "resource",
                    "Value": "volumes"
                }
            })

    def tag_filter(self, tags):
        tags_filtered = {}
        tags_filtered.clear()
        for t in tags:
            if t in self.filtered_tag_keys:
                tags_filtered.update({t: tags[t]})
        return tags_filtered

    def apply_tags_volumes(self):

        self.load_info_volumes()

        messages = []
        if len(self.volumes) <= 0:
            return
        for vol in self.volumes.keys():
            volume = self.volumes[vol]

            try:
                if volume["attached"] != "yes":
                    messages.append(
                        "ignoring volume {}=unattached to instance".format(
                            vol))
                    continue
            except KeyError:
                pass
            try:
                if len(volume["tags"]) <= 0:
                    messages.append(
                        "ignoring volume {}=empty tags".format(vol))
                    continue
                if 'error' in volume["tags"]:
                    messages.append("ignoring volume {}={}".format(
                        vol, str(volume["tags"])))
                    continue
            except KeyError as e:
                messages.append("ignoring volume {}=KeyError {}".format(
                    vol, e))
                continue

            msg = ("{}={}".format(vol, str(volume["tags"])))
            logging.info(msg)
            messages.append(msg)
            self.aws.clients["ec2"].create_tags(Resources=[vol],
                                                Tags=utils.tag_dict_to_list(
                                                    volume["tags"]))
        print("Tags applied to volumes: {}".format(json.dumps(messages)))
        return

    def apply_tags_snapshots(self):
        "TODO: use same tags of Volumes"
        return

    def apply_tags_instances(self):
        """ TODO:
        - use global/default tags.
        - Overwrite existing?
        - Useful to enforce to whole account; but in shared Account?
        - how to distinguish between two resources?
        """
        return

    def apply_tags_ec2_resource(self, resource_id, tags):
        return self.aws.clients["ec2"].create_tags(Resources=[resource_id],
                                                   Tags=tags)

    def apply_tags_images(self):
        "TODO: what tags to use? Globals?"
        return

    def apply_tags_from_event(self, event):
        if 'instance-id' in event["detail"]:
            return self.process_event_instance(event)

        elif 'event' in event["detail"]:
            if event["detail"]["event"] == "createVolume":
                return self.process_event_volume(event)

        return {"error": "event not found", "event": event}

    def process_event_instance(self, event):

        instance_id = event["detail"]["instance-id"]

        print("Processing InstanceID: {}".format(instance_id))
        instance = self.aws.get_instance_tags_api(instance_id)

        if not instance:
            print("ERR - No Tags or resource found for ID: {}".format(
                instance_id))
            return

        instance_tags = utils.tag_list_to_dict(instance["Tags"])

        missing_keys = self.check_required_tags(instance_tags)
        if len(missing_keys) <= 0:
            print(
                "That's OK, all required Tags was defined to resource_id: {}".
                format(instance_id))
            try:
                self.check_tags_instance_dm(instance["BlockDeviceMappings"],
                                            instance_tags)
            except Exception as e:
                print(
                    "Error - Tagging process was not complete in instance devices: {}"
                    .format(e))
                pass
            return

        try:
            vpc_id = instance["VpcId"]
        except:
            vpc_id = ''

        vpc = self.aws.get_vpc_tags(vpc_id)
        if len(vpc) <= 0:
            print("ERR - No VPC [{}] found, skipping tagger".format(vpc_id))
            return

        try:
            vpc_tags = utils.tag_list_to_dict(vpc[0]["tags"])
        except KeyError:
            print(
                "ERR - No VPC [{}] tags found, skipping tagger".format(vpc_id))
            return
        except:
            raise

        # discovery and fill tags to apply based on required (missing)
        tags_to_apply = self.mount_required_tags_instance(
            missing_keys=missing_keys,
            vpc_tags=vpc_tags,
            instance_tags=instance_tags)

        msg = {
            "msg": "Applying tags to Instance",
            "tags_to_apply": tags_to_apply,
            "resource_id": instance_id,
            "current_resource_tags": instance_tags,
            "current_vpc_tags": vpc_tags,
            "found_missing_keys": missing_keys
        }
        print(msg)
        self.apply_tags_ec2_resource(
            resource_id=instance["InstanceId"],
            tags=utils.tag_dict_to_list(tags_to_apply))

        if 'BlockDeviceMappings' not in instance:
            return

        return self.check_tags_instance_dm(instance["BlockDeviceMappings"], {
            **instance_tags,
            **tags_to_apply
        })

    def check_required_tags(self, instance_tags):
        missing_keys = []
        missing_keys.clear()
        for k in self.require_tags_instance:
            try:
                v = instance_tags[k]
            except KeyError:
                missing_keys.append(k)

        return missing_keys

    def mount_required_tags_instance(self, missing_keys, vpc_tags,
                                     instance_tags):
        """
        Check the missing keys and mount the tags to apply based on
        filters, VPC and Instance tags.
        """
        tags_to_apply = {}

        cnt = 0
        for k in missing_keys:
            try:
                tags_to_apply[k] = vpc_tags[k]
            except KeyError:
                try:
                    if self.tag_default_copy_key not in instance_tags:
                        continue

                    copy_tag = instance_tags[self.tag_default_copy_key]

                    tag_value = ''
                    cnt = 0
                    sep = self.tag_default_copy_split[0]
                    offset = int(self.tag_default_copy_split[1])
                    for v in copy_tag.split(sep):
                        tag_value += v
                        cnt += 1
                        if cnt >= offset:
                            break
                        tag_value += '-'

                    tags_to_apply[k] = tag_value
                except Exception as e:
                    print("Unexpected error: ", e)
                    raise

        return tags_to_apply

    def mount_required_tags_volume(self, missing_keys, instance_tags,
                                   volume_tags):
        """
        Check the missing keys and mount the tags to apply based on
        Required tags (Env var), Instance Tags and Current Volume tags.
        """
        tags_to_apply = {}

        cnt = 0
        for k in missing_keys:
            try:
                tags_to_apply[k] = instance_tags[k]
            except KeyError:
                try:
                    tags_to_apply[k] = 'missing-value'
                except Exception as e:
                    print("Unexpected error: ", e)
                    raise

        return tags_to_apply

    def check_tags_instance_dm(self, devices, itags):
        """Apply tags to Instance Devices """
        for device in devices:
            try:
                ebs_map = device["Ebs"]
            except KeyError:
                print(
                    "Error - 'Ebs' info is not found on device map: {}".format(
                        device))
                continue
            except Exception as e:
                print("Error - Ebs_map on check_tags_instance_dm(): {}".format(
                    e))
                continue

            try:
                self.apply_tags_volume(ebs_map["VolumeId"], itags,
                                       device["DeviceName"])
            except Exception as e:
                print(
                    "Unkwnown error on check_tags_instance_dm(): {}".format(e))
                pass

    def apply_tags_volume(self, volume_id, itags, device_name):
        if not itags:
            print("ignoring volume {}=empty tags".format(volume_id))
            return

        volume = self.aws.get_volume_tags_api(volume_id)
        if not volume:
            print("ERR - No Tags or resource found for ID: {}".format(
                instance_id))
            return

        volume_tags = utils.tag_list_to_dict(volume["Tags"])

        missing_keys = self.check_required_tags(volume_tags)
        if len(missing_keys) <= 0:
            print(
                "That's OK, all required Tags was defined to resource_id: {}".
                format(volume_id))
            return

        # TODO double check: instance_id (owner of itags) is the same of Attachments.InstanceId

        tags_to_apply = self.mount_required_tags_volume(
            missing_keys=missing_keys,
            instance_tags=itags,
            volume_tags=volume_tags)

        if 'Name' in tags_to_apply:
            tags_to_apply["Name"] += " " + device_name
        else:
            tags_to_apply["Name"] = "{} {}".format(itags["Name"], device_name)

        msg = {
            "msg": "Applying tags to Volume",
            "tags_to_apply": tags_to_apply,
            "resource_id": volume_id,
            "current_resource_tags": volume_tags,
            "current_instance_tags": itags,
            "found_missing_keys": missing_keys
        }
        print(msg)

        return self.apply_tags_ec2_resource(
            resource_id=volume_id, tags=utils.tag_dict_to_list(tags_to_apply))
Exemplo n.º 35
0
 def __init__(self, auth=None, region=None):
     AWS.__init__(self, auth, region)
Exemplo n.º 36
0
class InstanceInfo:
    def __init__(self, provider="other"):
        self.logger = logging.getLogger("DNSWatch.InstanceInfo")
        self.provider = provider
        if provider == "gce":
            self.cloud = GCE()
        elif provider == "aws":
            self.cloud = AWS()

    def get_fqdn(self):
        return socket.getfqdn()

    def get_hostname(self):
        """Return only name before first dot."""
        hostname = socket.gethostname()
        if hostname:
            try:
                hostname = hostname.split(".")[0]
            except ValueError:
                pass
            return hostname
        else:
            return None

    def get_private_ip(self):
        """
        Return one IP address belongs to network interfaces used for
        external connections.
        """
        self.logger.debug("Detecting private IP.")
        ip = None

        if self.provider in ["aws", "gce"]:
            ip = self._get_private_ip_cloud()
        else:
            ip = self._get_private_ip_other()

        self.logger.debug("My private IP: {}.".format(ip))
        return ip

    def get_public_ip(self):
        self.logger.debug("Detecting public IP.")
        ip = None

        if self.provider in ["aws", "gce"]:
            ip = self._get_public_ip_cloud()
        else:
            ip = self._get_public_ip_other()

        self.logger.debug("My public IP: {}.".format(ip))
        return ip

    def _get_private_ip_other(self):
        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        ip = None

        interfaces = self._get_interfaces()

        if len(interfaces) > 1:
            self.logger.debug(
                "More than one interface found, using external "\
                    "connect to find proper IP.")
            # First method        
            s.connect(("8.8.8.8", 53))
            ip = s.getsockname()[0]
        else:
            interface = interfaces[0]

            # Second method        
            ip = socket.inet_ntoa(fcntl.ioctl(
                s.fileno(),
                0x8915,  # SIOCGIFADDR
                struct.pack('256s', interface))[20:24])
        s.close()
        return ip

    def _get_private_ip_cloud(self):
        return self.cloud.get_private_ip()

    def _get_public_ip_other(self):
        ip = None

        try:
            name = socket.gethostbyaddr(self.private_ip)[0]
            ip = self._query(name, "A", ["8.8.8.8", "8.8.4.4"])[0]
            #ip = self._query(name, "A", ["127.0.1.1"])[0]
        except:
            self.logger.error("Failed to find public IP.")
        return ip

    def _get_public_ip_cloud(self):
        return self.cloud.get_public_ip()

    def _get_interfaces(self):
        self.logger.debug("Getting network interfaces.")
        interfaces = list()
        with open("/proc/net/dev", "r") as dev_file:
            devices = dev_file.readlines()
            for dev in devices[2:]:
                dev_name = dev.split(":")[0].strip()
                if dev_name != "lo":
                    interfaces.append(dev_name)
        self.logger.debug("Interfaces: {}.".format(interfaces))
        return interfaces