def command_json(args):
    work_to_do = WorkToDo(args.width, args.bucket, get_s3_clean_name(args.width, args.iterations, args.arcsec))
    work_to_do.calculate_work_to_do()

    node_details = {
        'i2.4xlarge': ['node_{0}'.format(i) for i in range(0, args.nodes)]
    }
    graph = BuildGraphClean(
        work_to_do.work_to_do,
        args.bucket,
        args.volume,
        args.parallel_streams,
        node_details,
        args.shutdown,
        args.width,
        args.iterations,
        args.arcsec,
        args.only_image,
        'session_id',
        '1.2.3.4')
    graph.build_graph()
    json_dumps = json.dumps(graph.drop_list, indent=2)
    LOG.info(json_dumps)
    with open("/tmp/json_clean.txt", "w") as json_file:
        json_file.write(json_dumps)
def use_and_generate(host, port, bucket_name, frequency_width, volume, add_shutdown, iterations):
    boto_data = get_aws_credentials('aws-chiles02')
    if boto_data is not None:
        connection = httplib.HTTPConnection(host, port)
        connection.request('GET', '/api', None, {})
        response = connection.getresponse()
        if response.status != httplib.OK:
            msg = 'Error while processing GET request for {0}:{1}/api (status {2}): {3}'.format(host, port, response.status, response.read())
            raise Exception(msg)

        json_data = response.read()
        message_details = json.loads(json_data)
        host_list = message_details['hosts']

        nodes_running = get_nodes_running(host_list)
        if len(nodes_running) > 0:
            work_to_do = WorkToDo(frequency_width, bucket_name, get_s3_clean_name(frequency_width, iterations))
            work_to_do.calculate_work_to_do()

            # Now build the graph
            session_id = get_session_id()
            graph = BuildGraphClean(work_to_do.work_to_do, bucket_name, volume, PARALLEL_STREAMS, nodes_running, add_shutdown, frequency_width, iterations, session_id)
            graph.build_graph()

            LOG.info('Connection to {0}:{1}'.format(host, port))
            client = DataIslandManagerClient(host, port)

            client.create_session(session_id)
            client.append_graph(session_id, graph.drop_list)
            client.deploy_session(session_id, graph.start_oids)

        else:
            LOG.warning('No nodes are running')
Esempio n. 3
0
def generate_json(
        width,
        bucket,
        iterations,
        arcsec,
        nodes,
        volume,
        parallel_streams,
        shutdown,
        w_projection_planes,
        robust,
        image_size,
        clean_channel_average,
        min_frequency,
        max_frequency,
        clean_directory_name,
        only_image):
    work_to_do = WorkToDo(
        width,
        bucket,
        get_s3_clean_name(width, iterations, arcsec) if clean_directory_name is None else clean_directory_name,
        min_frequency,
        max_frequency
    )
    work_to_do.calculate_work_to_do()

    node_details = {
        'i2.4xlarge': [{'ip_address': 'node_i2_{0}'.format(i)} for i in range(0, nodes)]
    }
    graph = BuildGraphClean(
        work_to_do=work_to_do.work_to_do,
        bucket_name=bucket,
        volume=volume,
        parallel_streams=parallel_streams,
        node_details=node_details,
        shutdown=shutdown,
        width=width,
        iterations=iterations,
        arcsec=arcsec + 'arcsec',
        w_projection_planes=w_projection_planes,
        robust=robust,
        image_size=image_size,
        clean_channel_average=clean_channel_average,
        clean_directory_name=clean_directory_name,
        only_image=only_image,
        session_id='session_id',
        dim_ip='1.2.3.4')
    graph.build_graph()
    json_dumps = json.dumps(graph.drop_list, indent=2)
    LOG.info(json_dumps)
    with open("/tmp/json_clean.txt", "w") as json_file:
        json_file.write(json_dumps)
Esempio n. 4
0
def command_json(args):
    work_to_do = WorkToDo(
        args.width, args.bucket,
        get_s3_clean_name(args.width, args.iterations, args.arcsec))
    work_to_do.calculate_work_to_do()

    node_details = {
        'i2.4xlarge': ['node_{0}'.format(i) for i in range(0, args.nodes)]
    }
    graph = BuildGraphClean(work_to_do.work_to_do, args.bucket, args.volume,
                            args.parallel_streams, node_details, args.shutdown,
                            args.width, args.iterations, args.arcsec,
                            args.only_image, 'session_id', '1.2.3.4')
    graph.build_graph()
    json_dumps = json.dumps(graph.drop_list, indent=2)
    LOG.info(json_dumps)
    with open("/tmp/json_clean.txt", "w") as json_file:
        json_file.write(json_dumps)
Esempio n. 5
0
def use_and_generate(host, port, bucket_name, frequency_width, volume,
                     add_shutdown, iterations, arcsec, only_image):
    boto_data = get_aws_credentials('aws-chiles02')
    if boto_data is not None:
        connection = httplib.HTTPConnection(host, port)
        connection.request('GET', '/api', None, {})
        response = connection.getresponse()
        if response.status != httplib.OK:
            msg = 'Error while processing GET request for {0}:{1}/api (status {2}): {3}'.format(
                host, port, response.status, response.read())
            raise Exception(msg)

        json_data = response.read()
        message_details = json.loads(json_data)
        host_list = message_details['hosts']

        nodes_running = get_nodes_running(host_list)
        if len(nodes_running) > 0:
            work_to_do = WorkToDo(
                frequency_width, bucket_name,
                get_s3_clean_name(frequency_width, iterations, arcsec))
            work_to_do.calculate_work_to_do()

            # Now build the graph
            session_id = get_session_id()
            graph = BuildGraphClean(work_to_do.work_to_do, bucket_name, volume,
                                    PARALLEL_STREAMS, nodes_running,
                                    add_shutdown, frequency_width, iterations,
                                    arcsec, only_image, session_id, host)
            graph.build_graph()

            LOG.info('Connection to {0}:{1}'.format(host, port))
            client = DataIslandManagerClient(host, port)

            client.create_session(session_id)
            client.append_graph(session_id, graph.drop_list)
            client.deploy_session(session_id, graph.start_oids)

        else:
            LOG.warning('No nodes are running')
Esempio n. 6
0
def use_and_generate(
        host,
        port,
        bucket_name,
        frequency_width,
        volume,
        add_shutdown,
        iterations,
        arcsec,
        w_projection_planes,
        robust,
        image_size,
        clean_channel_average,
        min_frequency,
        max_frequency,
        clean_directory_name,
        only_image):
    boto_data = get_aws_credentials('aws-chiles02')
    if boto_data is not None:
        connection = httplib.HTTPConnection(host, port)
        connection.request('GET', '/api', None, {})
        response = connection.getresponse()
        if response.status != httplib.OK:
            msg = 'Error while processing GET request for {0}:{1}/api (status {2}): {3}'.format(host, port, response.status, response.read())
            raise Exception(msg)

        json_data = response.read()
        message_details = json.loads(json_data)
        host_list = message_details['hosts']

        nodes_running = get_nodes_running(host_list)
        if len(nodes_running) > 0:
            work_to_do = WorkToDo(
                frequency_width,
                bucket_name,
                get_s3_clean_name(frequency_width, iterations, arcsec) if clean_directory_name is None else clean_directory_name,
                min_frequency,
                max_frequency
            )
            work_to_do.calculate_work_to_do()

            # Now build the graph
            session_id = get_session_id()
            graph = BuildGraphClean(
                work_to_do=work_to_do.work_to_do,
                bucket_name=bucket_name,
                volume=volume,
                parallel_streams=PARALLEL_STREAMS,
                node_details=nodes_running,
                shutdown=add_shutdown,
                width=frequency_width,
                iterations=iterations,
                arcsec=arcsec,
                w_projection_planes=w_projection_planes,
                robust=robust,
                image_size=image_size,
                clean_channel_average=clean_channel_average,
                clean_directory_name=clean_directory_name,
                only_image=only_image,
                session_id=session_id,
                dim_ip=host)
            graph.build_graph()

            # TODO: Save the run parameters

            LOG.info('Connection to {0}:{1}'.format(host, port))
            client = DataIslandManagerClient(host, port)

            client.create_session(session_id)
            client.append_graph(session_id, graph.drop_list)
            client.deploy_session(session_id, get_roots(graph.drop_list))

        else:
            LOG.warning('No nodes are running')
Esempio n. 7
0
def create_and_generate(
        bucket_name,
        frequency_width,
        ami_id,
        spot_price,
        volume,
        frequencies_per_node,
        add_shutdown,
        iterations,
        arcsec,
        w_projection_planes,
        robust,
        image_size,
        clean_channel_average,
        min_frequency,
        max_frequency,
        clean_directory_name,
        only_image,
        log_level):
    boto_data = get_aws_credentials('aws-chiles02')
    if boto_data is not None:
        work_to_do = WorkToDo(
            frequency_width,
            bucket_name,
            get_s3_clean_name(frequency_width, iterations, arcsec) if clean_directory_name is None else clean_directory_name,
            min_frequency,
            max_frequency
        )
        work_to_do.calculate_work_to_do()

        nodes_required, node_count = get_nodes_required(work_to_do.work_to_do, frequencies_per_node, spot_price)

        if len(nodes_required) > 0:
            uuid = get_uuid()
            ec2_data = EC2Controller(
                ami_id,
                nodes_required,
                get_node_manager_user_data(boto_data, uuid, log_level=log_level),
                AWS_REGION,
                tags=[
                    {
                        'Key': 'Owner',
                        'Value': getpass.getuser(),
                    },
                    {
                        'Key': 'Name',
                        'Value': 'DALiuGE NM - Clean',
                    },
                    {
                        'Key': 'uuid',
                        'Value': uuid,
                    }
                ]
            )
            ec2_data.start_instances()

            reported_running = get_reported_running(
                uuid,
                node_count,
                wait=600
            )

            if len(reported_running) == 0:
                LOG.error('Nothing has reported ready')
            else:
                hosts = build_hosts(reported_running)

                # Create the Data Island Manager
                data_island_manager = EC2Controller(
                    ami_id,
                    [
                        {
                            'number_instances': 1,
                            'instance_type': 'm4.large',
                            'spot_price': spot_price
                        }
                    ],
                    get_data_island_manager_user_data(boto_data, hosts, uuid, need_node_manager=True, log_level=log_level),
                    AWS_REGION,
                    tags=[
                        {
                            'Key': 'Owner',
                            'Value': getpass.getuser(),
                        },
                        {
                            'Key': 'Name',
                            'Value': 'DALiuGE DIM - Clean',
                        },
                        {
                            'Key': 'uuid',
                            'Value': uuid,
                        }
                    ]
                )
                data_island_manager.start_instances()
                data_island_manager_running = get_reported_running(
                        uuid,
                        1,
                        wait=600
                )

                if len(data_island_manager_running['m4.large']) == 1:
                    # Now build the graph
                    session_id = get_session_id()
                    instance_details = data_island_manager_running['m4.large'][0]
                    host = instance_details['ip_address']
                    graph = BuildGraphClean(
                        work_to_do=work_to_do.work_to_do,
                        bucket_name=bucket_name,
                        volume=volume,
                        parallel_streams=PARALLEL_STREAMS,
                        node_details=reported_running,
                        shutdown=add_shutdown,
                        width=frequency_width,
                        iterations=iterations,
                        arcsec=arcsec,
                        w_projection_planes=w_projection_planes,
                        robust=robust,
                        image_size=image_size,
                        clean_channel_average=clean_channel_average,
                        clean_directory_name=clean_directory_name,
                        only_image=only_image,
                        session_id=session_id,
                        dim_ip=host)
                    graph.build_graph()

                    # TODO: Safe the run parameters

                    LOG.info('Connection to {0}:{1}'.format(host, DIM_PORT))
                    client = DataIslandManagerClient(host, DIM_PORT)

                    client.create_session(session_id)
                    client.append_graph(session_id, graph.drop_list)
                    client.deploy_session(session_id, get_roots(graph.drop_list))
    else:
        LOG.error('Unable to find the AWS credentials')
def create_and_generate(bucket_name, frequency_width, ami_id, spot_price, volume, frequencies_per_node, add_shutdown, iterations, arcsec, only_image, log_level):
    boto_data = get_aws_credentials('aws-chiles02')
    if boto_data is not None:
        work_to_do = WorkToDo(frequency_width, bucket_name, get_s3_clean_name(frequency_width, iterations, arcsec))
        work_to_do.calculate_work_to_do()

        nodes_required, node_count = get_nodes_required(work_to_do.work_to_do, frequencies_per_node, spot_price)

        if len(nodes_required) > 0:
            uuid = get_uuid()
            ec2_data = EC2Controller(
                ami_id,
                nodes_required,
                get_node_manager_user_data(boto_data, uuid, log_level=log_level),
                AWS_REGION,
                tags=[
                    {
                        'Key': 'Owner',
                        'Value': getpass.getuser(),
                    },
                    {
                        'Key': 'Name',
                        'Value': 'Daliuge NM - Clean',
                    },
                    {
                        'Key': 'uuid',
                        'Value': uuid,
                    }
                ]
            )
            ec2_data.start_instances()

            reported_running = get_reported_running(
                uuid,
                node_count,
                wait=600
            )

            if len(reported_running) == 0:
                LOG.error('Nothing has reported ready')
            else:
                hosts = build_hosts(reported_running)

                # Create the Data Island Manager
                data_island_manager = EC2Controller(
                    ami_id,
                    [
                        {
                            'number_instances': 1,
                            'instance_type': 'm4.large',
                            'spot_price': spot_price
                        }
                    ],
                    get_data_island_manager_user_data(boto_data, hosts, uuid, need_node_manager=True, log_level=log_level),
                    AWS_REGION,
                    tags=[
                        {
                            'Key': 'Owner',
                            'Value': getpass.getuser(),
                        },
                        {
                            'Key': 'Name',
                            'Value': 'Daliuge DIM - Clean',
                        },
                        {
                            'Key': 'uuid',
                            'Value': uuid,
                        }
                    ]
                )
                data_island_manager.start_instances()
                data_island_manager_running = get_reported_running(
                        uuid,
                        1,
                        wait=600
                )

                if len(data_island_manager_running['m4.large']) == 1:
                    # Now build the graph
                    session_id = get_session_id()
                    instance_details = data_island_manager_running['m4.large'][0]
                    host = instance_details['ip_address']
                    graph = BuildGraphClean(
                        work_to_do.work_to_do,
                        bucket_name,
                        volume,
                        PARALLEL_STREAMS,
                        reported_running,
                        add_shutdown,
                        frequency_width,
                        iterations,
                        arcsec,
                        only_image,
                        session_id,
                        host)
                    graph.build_graph()

                    LOG.info('Connection to {0}:{1}'.format(host, DIM_PORT))
                    client = DataIslandManagerClient(host, DIM_PORT)

                    client.create_session(session_id)
                    client.append_graph(session_id, graph.drop_list)
                    client.deploy_session(session_id, graph.start_oids)
    else:
        LOG.error('Unable to find the AWS credentials')
Esempio n. 9
0
def create_and_generate(bucket_name, frequency_width, ami_id, spot_price,
                        volume, frequencies_per_node, add_shutdown, iterations,
                        arcsec, only_image, log_level):
    boto_data = get_aws_credentials('aws-chiles02')
    if boto_data is not None:
        work_to_do = WorkToDo(
            frequency_width, bucket_name,
            get_s3_clean_name(frequency_width, iterations, arcsec))
        work_to_do.calculate_work_to_do()

        nodes_required, node_count = get_nodes_required(
            work_to_do.work_to_do, frequencies_per_node, spot_price)

        if len(nodes_required) > 0:
            uuid = get_uuid()
            ec2_data = EC2Controller(ami_id,
                                     nodes_required,
                                     get_node_manager_user_data(
                                         boto_data, uuid, log_level=log_level),
                                     AWS_REGION,
                                     tags=[{
                                         'Key': 'Owner',
                                         'Value': getpass.getuser(),
                                     }, {
                                         'Key': 'Name',
                                         'Value': 'Daliuge NM - Clean',
                                     }, {
                                         'Key': 'uuid',
                                         'Value': uuid,
                                     }])
            ec2_data.start_instances()

            reported_running = get_reported_running(uuid, node_count, wait=600)

            if len(reported_running) == 0:
                LOG.error('Nothing has reported ready')
            else:
                hosts = build_hosts(reported_running)

                # Create the Data Island Manager
                data_island_manager = EC2Controller(
                    ami_id, [{
                        'number_instances': 1,
                        'instance_type': 'm4.large',
                        'spot_price': spot_price
                    }],
                    get_data_island_manager_user_data(boto_data,
                                                      hosts,
                                                      uuid,
                                                      need_node_manager=True,
                                                      log_level=log_level),
                    AWS_REGION,
                    tags=[{
                        'Key': 'Owner',
                        'Value': getpass.getuser(),
                    }, {
                        'Key': 'Name',
                        'Value': 'Daliuge DIM - Clean',
                    }, {
                        'Key': 'uuid',
                        'Value': uuid,
                    }])
                data_island_manager.start_instances()
                data_island_manager_running = get_reported_running(uuid,
                                                                   1,
                                                                   wait=600)

                if len(data_island_manager_running['m4.large']) == 1:
                    # Now build the graph
                    session_id = get_session_id()
                    instance_details = data_island_manager_running['m4.large'][
                        0]
                    host = instance_details['ip_address']
                    graph = BuildGraphClean(work_to_do.work_to_do, bucket_name,
                                            volume, PARALLEL_STREAMS,
                                            reported_running, add_shutdown,
                                            frequency_width, iterations,
                                            arcsec, only_image, session_id,
                                            host)
                    graph.build_graph()

                    LOG.info('Connection to {0}:{1}'.format(host, DIM_PORT))
                    client = DataIslandManagerClient(host, DIM_PORT)

                    client.create_session(session_id)
                    client.append_graph(session_id, graph.drop_list)
                    client.deploy_session(session_id, graph.start_oids)
    else:
        LOG.error('Unable to find the AWS credentials')