示例#1
0
文件: discover.py 项目: ambrt/straw
 def find_first_service(s):
     '''find the FIRST listed service with tag post-fix s
        NOTE: We implicitly assume that the list of AWS services is fixed.
        We should fix that by identifying the leader nodes among each service
        type.
     '''
     for v in self.services:
         if v.tags[0]['Value']==get_tag(s):
             return(v.private_ip_address)
示例#2
0
 def find_first_service(s):
     '''find the FIRST listed service with tag post-fix s
        NOTE: We implicitly assume that the list of AWS services is fixed.
        We should fix that by identifying the leader nodes among each service
        type.
     '''
     for v in self.services:
         if v.tags[0]['Value'] == get_tag(s):
             return (v.private_ip_address)
示例#3
0
 def __init__(self):
     ec2 = boto3.resource('ec2')
     client = boto3.client('ec2')
     filt = [{
         'Name': 'instance-state-name',
         'Values': ['running']
     }, {
         'Name': 'tag-value',
         'Values': [get_tag(s + '-node') for s in services]
     }]
     self.services = []
     for v in ec2.instances.filter(Filters=filt):
         self.services.append(v)
示例#4
0
    def make_config_file(self, filename):
        '''create a straw config file for AWS'''
        def find_first_service(s):
            '''find the FIRST listed service with tag post-fix s
               NOTE: We implicitly assume that the list of AWS services is fixed.
               We should fix that by identifying the leader nodes among each service
               type.
            '''
            for v in self.services:
                if v.tags[0]['Value'] == get_tag(s):
                    return (v.private_ip_address)

        with open(filename, "w") as f:
            header = """#
# config for straw.storm application
#

"""
            f.write(header)
            elasticsearch = """
# elasticsearch settings
elasticsearch_host={0}
elasticsearch_port=9300
elasticsearch_cluster_name={1}
index_name=documents
document_type=document
""".format(find_first_service("elasticsearch-node"),
            get_tag("elasticsearch-cluster"))
            f.write(elasticsearch)

            kafka = """
# kafka settings
zookeeper_host={0}
zookeeper_port=2181
kafka_query_topic=queries
kafka_document_topic=documents
""".format(find_first_service("kafka-node"))
            f.write(kafka)

            redis = """
# redis
redis_host={0}
redis_port=6379
""".format(find_first_service("flask-node"))
            f.write(redis)
        print("Wrote config file {0}.".format(f.name))
示例#5
0
文件: discover.py 项目: ambrt/straw
    def make_config_file(self, filename):
        '''create a straw config file for AWS'''

        def find_first_service(s):
            '''find the FIRST listed service with tag post-fix s
               NOTE: We implicitly assume that the list of AWS services is fixed.
               We should fix that by identifying the leader nodes among each service
               type.
            '''
            for v in self.services:
                if v.tags[0]['Value']==get_tag(s):
                    return(v.private_ip_address)

        with open(filename,"w") as f:
            header = """#
# config for straw.storm application
#

"""
            f.write(header)
            elasticsearch = """
# elasticsearch settings
elasticsearch_host={0}
elasticsearch_port=9300
elasticsearch_cluster_name={1}
index_name=documents
document_type=document
""".format(find_first_service("elasticsearch-node"), get_tag("elasticsearch-cluster"))
            f.write(elasticsearch)

            kafka = """
# kafka settings
zookeeper_host={0}
zookeeper_port=2181
kafka_query_topic=queries
kafka_document_topic=documents
""".format(find_first_service("kafka-node"))
            f.write(kafka)

            redis = """
# redis
redis_host={0}
redis_port=6379
""".format(find_first_service("flask-node"))
            f.write(redis)
        print("Wrote config file {0}.".format(f.name))
示例#6
0
#   Configure Kafka on ec2 instances
#
import boto3, os, argparse, sys
sys.path.append("..")
from botocore.exceptions import ClientError as BotoClientError
from time import sleep
from create_clusters import get_tag, keyfile
from config_utils import quiet_wrap

# configuration
my_instances_filters = [{
    'Name': 'instance-state-name',
    'Values': ['running']
}, {
    'Name': 'tag-value',
    'Values': [get_tag('storm-node')]
}]

if __name__ == "__main__":

    # argument help
    parser = argparse.ArgumentParser(
        description='Configure the storm cluster.')
    parser.add_argument('--elasticsearch',
                        help='Collocate elasticsearch with Storm cluster.',
                        action='store_true')
    args = parser.parse_args()

    # find all the host nodes
    ec2 = boto3.resource('ec2')
    hosts = []
示例#7
0
#!/usr/bin/python3
#
#   Configure Kafka on ec2 instances
#
import boto3, os, sys
sys.path.append("..")
from botocore.exceptions import ClientError as BotoClientError
from time import sleep
from create_clusters import get_tag, keyfile

# configuration
my_instances_filters = [{ 'Name': 'instance-state-name', 'Values': ['running']}, {'Name':'tag-value', 'Values':[get_tag('elasticsearch-node')]}]

if __name__=="__main__":
    
    # find all the host nodes
    ec2 = boto3.resource('ec2')
    hosts = []
    private_ips = []
    reservations = ec2.instances.filter( Filters = my_instances_filters )
    for instance in reservations:
        print("ID: {0:<15}\tIP: {1:<15}".format(instance.instance_id, instance.public_ip_address))
        hosts.append(instance.public_ip_address)
        private_ips.append(instance.private_ip_address)

    if len(hosts) != len(private_ips):
        raise(RuntimeError("Host and private ips not consistent!"))

    if len(hosts) == 0:
        raise(RuntimeError("No hosts found."))
示例#8
0
#!/usr/bin/python3
#
#   Configure Kafka on ec2 instances
#
import boto3, os, sys

sys.path.append("..")
from botocore.exceptions import ClientError as BotoClientError
from time import sleep
from create_clusters import get_tag, keyfile
from config_utils import quiet_wrap

# configuration
my_instances_filters = [
    {"Name": "instance-state-name", "Values": ["running"]},
    {"Name": "tag-value", "Values": [get_tag("flask-node")]},
]

if __name__ == "__main__":

    # find all the host nodes
    ec2 = boto3.resource("ec2")
    hosts = []
    private_ips = []
    public_dns = []
    reservations = ec2.instances.filter(Filters=my_instances_filters)
    for instance in reservations:
        print("ID: {0:<15}\tIP: {1:<15}".format(instance.instance_id, instance.public_ip_address))
        hosts.append(instance.public_ip_address)
        private_ips.append(instance.private_ip_address)
        public_dns.append(instance.public_dns_name)
#!/usr/bin/python3
#
#   Configure Kafka on ec2 instances
#
import boto3, os, sys

sys.path.append("..")
from botocore.exceptions import ClientError as BotoClientError
from time import sleep
from create_clusters import get_tag, keyfile

# configuration
my_instances_filters = [
    {"Name": "instance-state-name", "Values": ["running"]},
    {"Name": "tag-value", "Values": [get_tag("elasticsearch-node")]},
]

if __name__ == "__main__":

    # find all the host nodes
    ec2 = boto3.resource("ec2")
    hosts = []
    private_ips = []
    reservations = ec2.instances.filter(Filters=my_instances_filters)
    for instance in reservations:
        print("ID: {0:<15}\tIP: {1:<15}".format(instance.instance_id, instance.public_ip_address))
        hosts.append(instance.public_ip_address)
        private_ips.append(instance.private_ip_address)

    if len(hosts) != len(private_ips):
        raise (RuntimeError("Host and private ips not consistent!"))
示例#10
0
#!/usr/bin/python3
#
#   Configure Kafka on ec2 instances
#

import boto3, os, sys
from botocore.exceptions import ClientError as BotoClientError
from time import sleep
sys.path.append("..")
from create_clusters import get_tag, keyfile

# configuration
my_instances_filters = [{ 'Name': 'instance-state-name', 'Values': ['running']}, {'Name':'tag-value', 'Values':[get_tag('spark-node')]}]

if __name__=="__main__":
    
    # find all the host nodes
    ec2 = boto3.resource('ec2')
    hosts = []
    private_ips = []
    reservations = ec2.instances.filter( Filters = my_instances_filters )
    for instance in reservations:
        print("ID: {0:<15}\tIP: {1:<15}".format(instance.instance_id, instance.public_ip_address))
        hosts.append(instance.public_ip_address)
        private_ips.append(instance.private_ip_address)

    if len(hosts) != len(private_ips):
        raise(RuntimeError("Host and private ips not consistent!"))

    if len(hosts) == 0:
        raise(RuntimeError("No hosts found."))
示例#11
0
文件: discover.py 项目: ambrt/straw
 def __init__(self):
     ec2 = boto3.resource('ec2')
     client= boto3.client('ec2')
     filt=[{'Name': 'instance-state-name', 'Values': ['running']},{'Name':'tag-value','Values':[get_tag(s+'-node') for s in services]}]
     self.services = []
     for v in ec2.instances.filter(Filters=filt):
         self.services.append(v)
示例#12
0
#!/usr/bin/python3
#
#   Configure Kafka on ec2 instances
#
import boto3, os, argparse, sys
sys.path.append("..")
from botocore.exceptions import ClientError as BotoClientError
from time import sleep
from create_clusters import get_tag, keyfile
from config_utils import quiet_wrap

# configuration
my_instances_filters = [{ 'Name': 'instance-state-name', 'Values': ['running']}, {'Name':'tag-value', 'Values':[get_tag('storm-node')]}]

if __name__=="__main__":
    
    # argument help
    parser = argparse.ArgumentParser(description='Configure the storm cluster.')
    parser.add_argument('--elasticsearch', help='Collocate elasticsearch with Storm cluster.', action='store_true')
    args = parser.parse_args()

    # find all the host nodes
    ec2 = boto3.resource('ec2')
    hosts = []
    private_ips = []
    reservations = ec2.instances.filter( Filters = my_instances_filters )
    for instance in reservations:
        print("ID: {0:<15}\tIP: {1:<15}".format(instance.instance_id, instance.public_ip_address))
        hosts.append(instance.public_ip_address)
        private_ips.append(instance.private_ip_address)
示例#13
0
#!/usr/bin/python3
#
#   Configure Kafka on ec2 instances
#
import boto3, os, sys
sys.path.append("..")
from botocore.exceptions import ClientError as BotoClientError
from time import sleep
from create_clusters import get_tag, keyfile
from config_utils import quiet_wrap

# configuration
my_instances_filters = [{ 'Name': 'instance-state-name', 'Values': ['running']}, {'Name':'tag-value', 'Values':[get_tag('flask-node')]}]

if __name__=="__main__":
    
    # find all the host nodes
    ec2 = boto3.resource('ec2')
    hosts = []
    private_ips = []
    public_dns = []
    reservations = ec2.instances.filter( Filters = my_instances_filters )
    for instance in reservations:
        print("ID: {0:<15}\tIP: {1:<15}".format(instance.instance_id, instance.public_ip_address))
        hosts.append(instance.public_ip_address)
        private_ips.append(instance.private_ip_address)
        public_dns.append(instance.public_dns_name)

    if len(hosts) != len(private_ips):
        raise(RuntimeError("Host and private ips not consistent!"))
示例#14
0
#!/usr/bin/python3
#
#   Configure Kafka on ec2 instances
#

import boto3, os, sys
from botocore.exceptions import ClientError as BotoClientError
from time import sleep

sys.path.append("..")
from create_clusters import get_tag, keyfile

# configuration
my_instances_filters = [
    {"Name": "instance-state-name", "Values": ["running"]},
    {"Name": "tag-value", "Values": [get_tag("kafka-node")]},
]

if __name__ == "__main__":

    # find all the host nodes
    ec2 = boto3.resource("ec2")
    hosts = []
    private_ips = []
    reservations = ec2.instances.filter(Filters=my_instances_filters)
    for instance in reservations:
        print("ID: {0:<15}\tIP: {1:<15}".format(instance.instance_id, instance.public_ip_address))
        hosts.append(instance.public_ip_address)
        private_ips.append(instance.private_ip_address)

    if len(hosts) != len(private_ips):