Beispiel #1
0
def main():

    uid = guid()  # unique user id

    # create flask session with uid

    return redirect("/rider/{}".format(uid))
    def create_ec2(self):
        instance = ec2.Instance(self,
                                guid('EC2-'),
                                vpc=self.vpc,
                                instance_type=self.DEFAULT_EC2_TYPE,
                                machine_image=self.DEFAULT_IMAGE)

        return instance
Beispiel #3
0
def loginDriver():
    # login driver
    # create session
    # insert to drivers topics

    duid = guid()
    # redirect to /driver/<driver_id> with uuid
    return redirect("/driver/{}".format(duid))
    pass
 def create_elasticache(self):
     cache = elasticache.CfnCacheCluster(
         self,
         guid('ELASTICACHE-'),
         cache_node_type="cache.t3.micro",
         engine="redis",
         num_cache_nodes=1,
         port=6379,
         az_mode="cross-az",
         vpc_security_group_ids=['vpc-1657f27d'])
     return cache
    def create_elb(self):
        lb = elb.LoadBalancer(self,
                              guid('ELB-'),
                              vpc=self.vpc,
                              cross_zone=True,
                              internet_facing=True,
                              health_check=elb.HealthCheck(port=80))

        lb.add_target(self.auto_scaling_group)
        lb.add_listener(external_port=80)

        return lb
 def create_auto_scaling_group(self):
     auto_scaling_group = autoscaling.AutoScalingGroup(
         self,
         guid('ASG-'),
         vpc=self.vpc,
         instance_type=self.DEFAULT_EC2_TYPE,
         machine_image=self.DEFAULT_IMAGE,
         min_capacity=1,
         max_capacity=5)
     auto_scaling_group.scale_on_cpu_utilization(
         "keepCpuUtilization", target_utilization_percent=10)
     return auto_scaling_group
Beispiel #7
0
def driver(driver_id=None):

    if driver_id is None:  # generate driver's ID
        driver_id = guid()

    # create thread safe data structure to consumer messages under topic routes with driver's ID
    msgList = []
    for msg in consumer:
        msgList.append(msg)

    return render_template('driver.html',
                           driver_id=driver_id,
                           uuid=driver_id,
                           response=''.join(msgList))
    pass
 def create_rds(self):
     my_rds = rds.DatabaseInstance(
         self,
         guid('RDS-'),
         master_username="******",
         master_user_password=core.SecretValue.plain_text("password"),
         database_name="db1",
         engine=rds.DatabaseInstanceEngine.MYSQL,
         vpc=self.vpc,
         port=3306,
         instance_type=self.DEFAULT_EC2_TYPE,
         removal_policy=core.RemovalPolicy.DESTROY,
         deletion_protection=False,
         multi_az=True,
         max_allocated_storage=1000)
     return my_rds
 def create_key(self):
     """
     Creates an access key for the test user.
     :return: The created access key.
     """
     try:
         key_pair = iam.CfnAccessKey(self,
                                     guid('IMA_KEY-'),
                                     user_name='test_user')
         logger.info("Created access key pair for test user. Key ID is %s.",
                     key_pair.user_name,
                     key_pair.get_att('attr_secret_access_key'))
     except Exception as e:
         logger.exception("Couldn't create access key pair for test user")
         raise
     else:
         return key_pair.get_att('attr_secret_access_key')
    def create_backup_function(self):
        bucket = s3.Bucket.from_bucket_arn(
            self,
            guid('LAMBDA-SYNC-'),
            bucket_arn="arn:aws:s3:::interview-lambda")
        fn = _lambda.Function(self,
                              "BackupHandler",
                              runtime=_lambda.Runtime.PYTHON_3_8,
                              handler="lambda_function.lambda_handler",
                              code=_lambda.Code.from_bucket(
                                  bucket, 'backup.zip'))

        apigw.LambdaRestApi(
            self,
            'backup',
            handler=fn,
        )
        return fn
 def create_sync_function(self):
     bucket = s3.Bucket.from_bucket_arn(
         self,
         guid('LAMBDA-BACKUP-'),
         bucket_arn="arn:aws:s3:::interview-lambda")
     fn = _lambda.Function(self,
                           "SyncHandler",
                           runtime=_lambda.Runtime.PYTHON_3_8,
                           handler="lambda_function.lambda_handler",
                           code=_lambda.Code.from_bucket(
                               bucket, 'sync.zip'))
     bucket.grant_read_write(fn)
     apigw.LambdaRestApi(
         self,
         'sync',
         handler=fn,
     )
     return fn
Beispiel #12
0
def handle(iface, team_id, pkt):
    try:
        if not check_sign(pkt['data'], pkt['sign']): return
    except:
        return

    if pkt['cmd'] == 0:
        try:
            data = int(pkt['data']).to_bytes(300,
                                             byteorder='big').lstrip(b'\0')
            if b"start:" in data and pkt['stream'] not in current_streams:
                try:
                    current_streams[pkt['stream']] = 1
                    B, dh_key = downhill(*data.split(b':')[1:4])
                except:
                    return
                else:
                    if pkt['stream'] not in dh_keys:
                        dh_keys[pkt['stream']] = dh_key
                    send(iface, team_id, 1, b'pub:%d' % B, pkt['stream'])
                    return
        except Exception as e:
            logger.error(e)
            return

    elif pkt['cmd'] == 2 and pkt['stream'] in dh_keys:
        dh_key = dh_keys.get(pkt['stream'], 1)
        try:
            data = (int(int(pkt['data']) // dh_key)).to_bytes(
                300, byteorder='big').lstrip(b'\0')
            if b"put:" in data:
                sens_data = data.split(b'put:')[1]
                sensor = guid()
                del dh_keys[pkt['stream']]
                with open(sensors.value + sensor, 'wb') as s:
                    s.write(sens_data)
                logger.warning("new data in %s" % sensor)
                send(iface, team_id, 3, b'ACCEPT:%s' % sensor.encode("utf8"),
                     pkt['stream'], dh_key)
                del current_streams[pkt['stream']]
                return
        except Exception as e:
            logger.error(e)
            pass
 def __init__(self,
              scope: core.Construct,
              id: str,
              vpc_id: str = guid('VPC-'),
              **kwargs) -> None:
     super().__init__(scope, id, **kwargs)
     # self.iam_key_id = self.create_key().to_string()
     self.vpc: ec2.Vpc = ec2.Vpc(self, vpc_id, max_azs=2)
     self.auto_scaling_group: autoscaling.AutoScalingGroup = self.create_auto_scaling_group(
     )
     self.elb: elb.LoadBalancer = self.create_elb()
     self.rds: rds.DatabaseInstance = self.create_rds()
     self.cloudfront: cloudfront.Distribution = self.create_cloud_front()
     self.archive_storage: s3.Bucket = self.create_s3(id='archive_storage',
                                                      versioned=True)
     self.ec2: ec2.Instance = self.create_ec2()
     # self.cache: elasticache.CfnCacheCluster = self.create_elasticache()
     self.create_sync_function()
     self.create_backup_function()
Beispiel #14
0
def handle(iface, team_id, pkt):
    try:
        if not check_sign(pkt['data'], pkt['sign']): return
    except:
        return

    if pkt['cmd'] == 0:
        try:
            data = int(pkt['data']).to_bytes(300, byteorder='big').lstrip(b'\0')
            if b"start:" in data and pkt['stream'] not in current_streams:
                try:
                    current_streams[pkt['stream']] = 1
                    B, dh_key = downhill(*data.split(b':')[1:4])
                except:
                    return
                else:
                    if pkt['stream'] not in dh_keys:
                        dh_keys[pkt['stream']] = dh_key
                    send(iface, team_id, 1, b'pub:%d' % B, pkt['stream'])
                    return
        except Exception as e:
            logger.error(e)
            return

    elif pkt['cmd'] == 2 and pkt['stream'] in dh_keys:
        dh_key = dh_keys.get(pkt['stream'], 1)
        try:
            data = (int(int(pkt['data']) // dh_key)).to_bytes(
                300, byteorder='big').lstrip(b'\0')
            if b"put:" in data:
                sens_data = data.split(b'put:')[1]
                sensor = guid()
                del dh_keys[pkt['stream']]
                with open(sensors.value + sensor, 'wb') as s:
                    s.write(sens_data)
                logger.warning("new data in %s" % sensor)
                send(iface, team_id, 3, b'ACCEPT:%s' % sensor.encode("utf8"), pkt['stream'], dh_key)
                del current_streams[pkt['stream']]
                return
        except Exception as e:
            logger.error(e)
            pass
 def create_s3(self,
               id: str = guid('S3-'),
               versioned: bool = False,
               type: str = 's3'):
     """
     Creates an S3 bucket.
     :param: id
     :param: versioned
     :param: type: s3/Glacier/Glacier Deep Archive
     :return: The bucket.
     """
     bucket = s3.Bucket(self,
                        id,
                        versioned=versioned,
                        cors=[
                            s3.CorsRule(
                                allowed_methods=[s3.HttpMethods.GET],
                                allowed_origins=['*'])
                        ])
     return bucket