def use_and_generate(host, port, bucket_name, frequency_width, volume, add_shutdown): boto_data = get_aws_credentials('aws-chiles02') if boto_data is not None: connection = httplib.HTTPConnection(host, port) connection.request('GET', '/api', None, {}) response = connection.getresponse() if response.status != httplib.OK: msg = 'Error while processing GET request for {0}:{1}/api (status {2}): {3}'.format(host, port, response.status, response.read()) raise Exception(msg) json_data = response.read() message_details = json.loads(json_data) host_list = message_details['hosts'] nodes_running = get_nodes_running(host_list) if len(nodes_running) > 0: work_to_do = WorkToDo(frequency_width, bucket_name, get_s3_uvsub_name(frequency_width), get_s3_split_name(frequency_width)) work_to_do.calculate_work_to_do() # Now build the graph session_id = get_session_id() graph = BuildGraphUvsub(work_to_do.work_to_do, bucket_name, volume, PARALLEL_STREAMS, nodes_running, add_shutdown, frequency_width, session_id, host) graph.build_graph() LOG.info('Connection to {0}:{1}'.format(host, port)) client = DataIslandManagerClient(host, port) client.create_session(session_id) client.append_graph(session_id, graph.drop_list) client.deploy_session(session_id, graph.start_oids) else: LOG.warning('No nodes are running')
def generate_json(width, bucket, nodes, volume, shutdown): work_to_do = WorkToDo(width, bucket, get_s3_uvsub_name(width), get_s3_split_name(width)) work_to_do.calculate_work_to_do() node_details = { 'i2.2xlarge': [{'ip_address': 'node_i2_{0}'.format(i)} for i in range(0, nodes)], } graph = BuildGraphUvsub(work_to_do.work_to_do, bucket, volume, PARALLEL_STREAMS, node_details, shutdown, width, 'session_id', '1.2.3.4') graph.build_graph() json_dumps = json.dumps(graph.drop_list, indent=2) LOG.info(json_dumps) with open("/tmp/json_uvsub.txt", "w") as json_file: json_file.write(json_dumps)
def generate_json(width, bucket, nodes, volume, shutdown): work_to_do = WorkToDo(width, bucket, get_s3_uvsub_name(width), get_s3_split_name(width)) work_to_do.calculate_work_to_do() node_details = { 'i2.2xlarge': [{ 'ip_address': 'node_i2_{0}'.format(i) } for i in range(0, nodes)], } graph = BuildGraphUvsub(work_to_do.work_to_do, bucket, volume, PARALLEL_STREAMS, node_details, shutdown, width, 'session_id', '1.2.3.4') graph.build_graph() json_dumps = json.dumps(graph.drop_list, indent=2) LOG.info(json_dumps) with open("/tmp/json_uvsub.txt", "w") as json_file: json_file.write(json_dumps)
def use_and_generate(host, port, bucket_name, frequency_width, volume, add_shutdown): boto_data = get_aws_credentials('aws-chiles02') if boto_data is not None: connection = httplib.HTTPConnection(host, port) connection.request('GET', '/api', None, {}) response = connection.getresponse() if response.status != httplib.OK: msg = 'Error while processing GET request for {0}:{1}/api (status {2}): {3}'.format( host, port, response.status, response.read()) raise Exception(msg) json_data = response.read() message_details = json.loads(json_data) host_list = message_details['hosts'] nodes_running = get_nodes_running(host_list) if len(nodes_running) > 0: work_to_do = WorkToDo(frequency_width, bucket_name, get_s3_uvsub_name(frequency_width), get_s3_split_name(frequency_width)) work_to_do.calculate_work_to_do() # Now build the graph session_id = get_session_id() graph = BuildGraphUvsub(work_to_do.work_to_do, bucket_name, volume, PARALLEL_STREAMS, nodes_running, add_shutdown, frequency_width, session_id, host) graph.build_graph() LOG.info('Connection to {0}:{1}'.format(host, port)) client = DataIslandManagerClient(host, port) client.create_session(session_id) client.append_graph(session_id, graph.drop_list) client.deploy_session(session_id, graph.start_oids) else: LOG.warning('No nodes are running')
def generate_json( width, w_projection_planes, bucket, nodes, volume, shutdown, min_frequency, max_frequency, scan_statistics): work_to_do = WorkToDo( width=width, bucket_name=bucket, s3_uvsub_name=get_s3_uvsub_name(width), s3_split_name=get_s3_split_name(width), min_frequency=min_frequency, max_frequency=max_frequency) work_to_do.calculate_work_to_do() node_details = { 'i2.2xlarge': [{'ip_address': 'node_i2_{0}'.format(i)} for i in range(0, nodes)], } graph = BuildGraphUvsub( work_to_do=work_to_do.work_to_do, bucket_name=bucket, volume=volume, parallel_streams=PARALLEL_STREAMS, node_details=node_details, shutdown=shutdown, scan_statistics=scan_statistics, width=width, w_projection_planes=w_projection_planes, session_id='session_id', dim_ip='1.2.3.4') graph.build_graph() json_dumps = json.dumps(graph.drop_list, indent=2) LOG.info(json_dumps) with open("/tmp/json_uvsub.txt", "w") as json_file: json_file.write(json_dumps)
def use_and_generate( host, port, bucket_name, frequency_width, w_projection_planes, volume, add_shutdown, min_frequency, max_frequency, scan_statistics, dump_json): boto_data = get_aws_credentials('aws-chiles02') if boto_data is not None: connection = httplib.HTTPConnection(host, port) connection.request('GET', '/api', None, {}) response = connection.getresponse() if response.status != httplib.OK: msg = 'Error while processing GET request for {0}:{1}/api (status {2}): {3}'.format(host, port, response.status, response.read()) raise Exception(msg) json_data = response.read() message_details = json.loads(json_data) host_list = message_details['hosts'] nodes_running = get_nodes_running(host_list) if len(nodes_running) > 0: work_to_do = WorkToDo( width=frequency_width, bucket_name=bucket_name, s3_uvsub_name=get_s3_uvsub_name(frequency_width), s3_split_name=get_s3_split_name(frequency_width), min_frequency=min_frequency, max_frequency=max_frequency) work_to_do.calculate_work_to_do() # Now build the graph session_id = get_session_id() graph = BuildGraphUvsub( work_to_do=work_to_do.work_to_do, bucket_name=bucket_name, volume=volume, parallel_streams=PARALLEL_STREAMS, node_details=nodes_running, shutdown=add_shutdown, scan_statistics=scan_statistics, width=frequency_width, w_projection_planes=w_projection_planes, session_id=session_id, dim_ip=host) graph.build_graph() if dump_json: json_dumps = json.dumps(graph.drop_list, indent=2) with open("/tmp/json_uvsub.txt", "w") as json_file: json_file.write(json_dumps) LOG.info('Connection to {0}:{1}'.format(host, port)) client = DataIslandManagerClient(host, port) client.create_session(session_id) client.append_graph(session_id, graph.drop_list) client.deploy_session(session_id, get_roots(graph.drop_list)) else: LOG.warning('No nodes are running')
def create_and_generate( bucket_name, frequency_width, w_projection_planes, ami_id, spot_price, volume, nodes, add_shutdown, min_frequency, max_frequency, scan_statistics, dump_json): boto_data = get_aws_credentials('aws-chiles02') if boto_data is not None: work_to_do = WorkToDo( width=frequency_width, bucket_name=bucket_name, s3_uvsub_name=get_s3_uvsub_name(frequency_width), s3_split_name=get_s3_split_name(frequency_width), min_frequency=min_frequency, max_frequency=max_frequency) work_to_do.calculate_work_to_do() nodes_required, node_count = get_nodes_required(nodes, spot_price) if len(nodes_required) > 0: uuid = get_uuid() ec2_data = EC2Controller( ami_id, nodes_required, get_node_manager_user_data(boto_data, uuid, max_request_size=50), AWS_REGION, tags=[ { 'Key': 'Owner', 'Value': getpass.getuser(), }, { 'Key': 'Name', 'Value': 'DALiuGE NM - Uvsub', }, { 'Key': 'uuid', 'Value': uuid, } ] ) ec2_data.start_instances() reported_running = get_reported_running( uuid, node_count, wait=600 ) if len(reported_running) == 0: LOG.error('Nothing has reported ready') else: hosts = build_hosts(reported_running) # Create the Data Island Manager data_island_manager = EC2Controller( ami_id, [ { 'number_instances': 1, 'instance_type': 'm4.large', 'spot_price': spot_price } ], get_data_island_manager_user_data(boto_data, hosts, uuid, max_request_size=50), AWS_REGION, tags=[ { 'Key': 'Owner', 'Value': getpass.getuser(), }, { 'Key': 'Name', 'Value': 'DALiuGE DIM - Uvsub', }, { 'Key': 'uuid', 'Value': uuid, } ] ) data_island_manager.start_instances() data_island_manager_running = get_reported_running( uuid, 1, wait=600 ) if len(data_island_manager_running['m4.large']) == 1: # Now build the graph session_id = get_session_id() instance_details = data_island_manager_running['m4.large'][0] host = instance_details['ip_address'] graph = BuildGraphUvsub( work_to_do=work_to_do.work_to_do, bucket_name=bucket_name, volume=volume, parallel_streams=PARALLEL_STREAMS, node_details=reported_running, shutdown=add_shutdown, scan_statistics=scan_statistics, width=frequency_width, w_projection_planes=w_projection_planes, session_id=session_id, dim_ip=host) graph.build_graph() if dump_json: json_dumps = json.dumps(graph.drop_list, indent=2) with open("/tmp/json_uvsub.txt", "w") as json_file: json_file.write(json_dumps) LOG.info('Connection to {0}:{1}'.format(host, DIM_PORT)) client = DataIslandManagerClient(host, DIM_PORT) client.create_session(session_id) client.append_graph(session_id, graph.drop_list) client.deploy_session(session_id, get_roots(graph.drop_list)) else: LOG.error('Unable to find the AWS credentials')
def create_and_generate(bucket_name, frequency_width, ami_id, spot_price, volume, nodes, add_shutdown): boto_data = get_aws_credentials('aws-chiles02') if boto_data is not None: work_to_do = WorkToDo(frequency_width, bucket_name, get_s3_uvsub_name(frequency_width), get_s3_split_name(frequency_width)) work_to_do.calculate_work_to_do() nodes_required, node_count = get_nodes_required(nodes, spot_price) if len(nodes_required) > 0: uuid = get_uuid() ec2_data = EC2Controller( ami_id, nodes_required, get_node_manager_user_data(boto_data, uuid, max_request_size=50), AWS_REGION, tags=[ { 'Key': 'Owner', 'Value': getpass.getuser(), }, { 'Key': 'Name', 'Value': 'Daliuge NM - Uvsub', }, { 'Key': 'uuid', 'Value': uuid, } ] ) ec2_data.start_instances() reported_running = get_reported_running( uuid, node_count, wait=600 ) if len(reported_running) == 0: LOG.error('Nothing has reported ready') else: hosts = build_hosts(reported_running) # Create the Data Island Manager data_island_manager = EC2Controller( ami_id, [ { 'number_instances': 1, 'instance_type': 'm4.large', 'spot_price': spot_price } ], get_data_island_manager_user_data(boto_data, hosts, uuid, max_request_size=50), AWS_REGION, tags=[ { 'Key': 'Owner', 'Value': getpass.getuser(), }, { 'Key': 'Name', 'Value': 'Daliuge DIM - Uvsub', }, { 'Key': 'uuid', 'Value': uuid, } ] ) data_island_manager.start_instances() data_island_manager_running = get_reported_running( uuid, 1, wait=600 ) if len(data_island_manager_running['m4.large']) == 1: # Now build the graph session_id = get_session_id() instance_details = data_island_manager_running['m4.large'][0] host = instance_details['ip_address'] graph = BuildGraphUvsub(work_to_do.work_to_do, bucket_name, volume, PARALLEL_STREAMS, reported_running, add_shutdown, frequency_width, session_id, host) graph.build_graph() LOG.info('Connection to {0}:{1}'.format(host, DIM_PORT)) client = DataIslandManagerClient(host, DIM_PORT) client.create_session(session_id) client.append_graph(session_id, graph.drop_list) client.deploy_session(session_id, graph.start_oids) else: LOG.error('Unable to find the AWS credentials')
def create_and_generate(bucket_name, frequency_width, ami_id, spot_price, volume, nodes, add_shutdown): boto_data = get_aws_credentials('aws-chiles02') if boto_data is not None: work_to_do = WorkToDo(frequency_width, bucket_name, get_s3_uvsub_name(frequency_width), get_s3_split_name(frequency_width)) work_to_do.calculate_work_to_do() nodes_required, node_count = get_nodes_required(nodes, spot_price) if len(nodes_required) > 0: uuid = get_uuid() ec2_data = EC2Controller(ami_id, nodes_required, get_node_manager_user_data( boto_data, uuid, max_request_size=50), AWS_REGION, tags=[{ 'Key': 'Owner', 'Value': getpass.getuser(), }, { 'Key': 'Name', 'Value': 'Daliuge NM - Uvsub', }, { 'Key': 'uuid', 'Value': uuid, }]) ec2_data.start_instances() reported_running = get_reported_running(uuid, node_count, wait=600) if len(reported_running) == 0: LOG.error('Nothing has reported ready') else: hosts = build_hosts(reported_running) # Create the Data Island Manager data_island_manager = EC2Controller( ami_id, [{ 'number_instances': 1, 'instance_type': 'm4.large', 'spot_price': spot_price }], get_data_island_manager_user_data(boto_data, hosts, uuid, max_request_size=50), AWS_REGION, tags=[{ 'Key': 'Owner', 'Value': getpass.getuser(), }, { 'Key': 'Name', 'Value': 'Daliuge DIM - Uvsub', }, { 'Key': 'uuid', 'Value': uuid, }]) data_island_manager.start_instances() data_island_manager_running = get_reported_running(uuid, 1, wait=600) if len(data_island_manager_running['m4.large']) == 1: # Now build the graph session_id = get_session_id() instance_details = data_island_manager_running['m4.large'][ 0] host = instance_details['ip_address'] graph = BuildGraphUvsub(work_to_do.work_to_do, bucket_name, volume, PARALLEL_STREAMS, reported_running, add_shutdown, frequency_width, session_id, host) graph.build_graph() LOG.info('Connection to {0}:{1}'.format(host, DIM_PORT)) client = DataIslandManagerClient(host, DIM_PORT) client.create_session(session_id) client.append_graph(session_id, graph.drop_list) client.deploy_session(session_id, graph.start_oids) else: LOG.error('Unable to find the AWS credentials')