Ejemplo n.º 1
0
def check(event, context):
    try:
        # Fetch data
        data = fetch_post_data(event)
        infer_config = fetch_inference_json()

        # Check if model is currently training
        server_status_config = fetch_status()
        if server_status_config['status'] == 'active' and server_status_config['token'] == data['token']:
            return create_response({
                'result': 'error',
                'message': 'The model is currently training. Please try again after a few minutes.'
            })

        # Check if token exists
        if not data['token'] in infer_config:
            return create_response({
                'result': 'error',
                'message': 'No such token found.'
            })

        task_config = infer_config[data['token']]
        return create_response({
            'result': 'success',
            'taskType': task_config['task_type'],
            'accuracy': task_config['accuracy'],
            'accuracyPlot': task_config['accuracy_plot'],
        })
    except Exception as e:
        print(repr(e))
        return create_response({
            'result': 'internal_error',
            'message': repr(e),
        }, status_code=500)
def index(request):
    if request.method == 'POST' and 'csvFile' in request.FILES:
        csv = request.FILES['csvFile']
        csv_file_name = str(request.FILES['csvFile'])
        parsed_csv = parse_csv(csv)
        if parsed_csv == 'Not a CSV':
            return JsonResponse({"notCsv": "true"})
        header = parsed_csv.pop(0)
        response_data = create_response(parsed_csv, header, False)
        context = {"html": response_data['html'], "data": response_data["unique_mpn"]}
        return JsonResponse(context)
    elif len(request.POST.getlist('rawHTML')) != 0:
        html = request.POST.getlist("rawHTML")[0]
        parsed_html = parse_html(html)
        head = [value.get_text() for value in parsed_html['header']]
        table = get_html_text(parsed_html['rows'])
        if request.POST.getlist('sort')[0]  == "false":
            file_name = request.POST.getlist("filename")[0]
            for row in table[2:-1]:
                Build_Data.objects.create(file_name=file_name, designator=row[0], footprint=row[1], mid_x=row[2], mid_y=row[3], ref_x=row[4], ref_y=row[5], pad_y=row[6], pad_x=row[7], layer=row[8], rotation=row[9], comment=row[10])
            return HttpResponse("")
        else:       
            response_data = create_response(table, head, True)
            context = {"html": response_data['html'], "data": response_data["unique_mpn"]}
            return JsonResponse(context)
    return render(request, 'home.html')
Ejemplo n.º 3
0
def train(event, context):
    try:
        server_status = fetch_status()
        if server_status['status'] == 'active':
            return create_response({
                'result': 'error',
                'message': 'Server is busy.',
            })

        # Fetch data
        data = fetch_post_data(event)

        # Check if server is properly shutdown
        if EC2_RESOURCE.Instance(INSTANCE_ID).state['Name'] == 'stopping':
            return create_response({
                'result': 'error',
                'message': 'Server is currently training another model, please check back in 5 minutes.'
            })

        # Get number of classes and validate data
        if data['taskType'].lower() == 'sentimentanalysis':
            validation_response = validate_csv(data['dataset'])
            if validation_response['is_valid']:
                data['dataset'] = validation_response['data']
                data['numClasses'] = validation_response['num_classes']
            else:
                return create_response({
                    'result': 'error',
                    'message': validation_response['message'],
                })
        else:
            data['numClasses'] = len(data['dataset'])

        # Create token
        token = create_user_token(
            data['taskType'], data['taskName']
        )
        print('Token:', token)

        # Change server status to active
        change_server_status(
            'active', server_status['dev_mode'],
            task_type=data['taskType'].lower(), token=token
        )

        # Initialize training process
        create_training_json(token, data)

        return create_response({
            'result': 'success',
            'token': token
        })
    except Exception as e:
        print(repr(e))
        return create_response({
            'result': 'internal_error',
            'message': repr(e),
        }, status_code=500)
Ejemplo n.º 4
0
def run(data):
    has_error = False
    logging.info("started run.")

    # CONVERT STREAM ANALYTICS TO SKTIME FORMAT
    logging.info("loading json.")
    data = json.loads(data)
    logging.info("json loaded.")

    # Parse timestamps and temperature data
    time_created_start = data.get("allevents")[0].get("timeCreated")
    time_created_end = data.get("allevents")[-1].get("timeCreated")
    temperature_data = [
        event.get("temperature") for event in data.get("allevents")
    ]

    logging.info(f"time_created_start: {time_created_start}")
    logging.info(f"time_created_end: {time_created_end}")
    logging.info(f"temperature_data: {temperature_data}")

    # Check connection_device_id
    connection_device_id, has_error, error_message = get_connection_device_id(
        data)
    if has_error:
        return create_response(has_error=has_error,
                               error_message=error_message)

    # Assert time series has at least TIMESERIESLENGTH elements
    if len(temperature_data) < TIMESERIESLENGTH:
        error_message = f"Time series of length {len(temperature_data)} does not have enough samples ({TIMESERIESLENGTH} samples required)."
        logging.warning(error_message)
        return create_response(has_error=True, error_message=error_message)

    # Convert data to sktime format
    case_id, dim_id = 0, 0
    try:
        long_data = [[case_id, dim_id, reading_id, reading_data]
                     for reading_id, reading_data in enumerate(
                         temperature_data[-TIMESERIESLENGTH:])]
    except Exception as e:
        error_message = (
            f"Could not convert dataset to long format due to exception: '{e}'"
        )
        logging.error(error_message)
        return create_response(has_error=True, error_message=error_message)

    # Predict
    long_df = pd.DataFrame(
        long_data, columns=["case_id", "dim_id", "reading_id", "value"])
    sktime_df = from_long_to_nested(long_df)
    prediction = model.predict(sktime_df).tolist()[0]

    return create_response(
        prediction=prediction,
        connection_device_id=connection_device_id,
        time_created_start=time_created_start,
        time_created_end=time_created_end,
    )
Ejemplo n.º 5
0
def handler(event, context):
    logger.info(f'Received event: {event}')
    headers = event.get('headers', {})
    request_body = json.loads(event['body'])

    record_id = str(uuid.uuid4().hex)
    message = request_body.get('message')
    output_format = request_body.get('outputFormat') or default_format
    sample_rate = request_body.get('sampleRate')

    if output_format not in supported_formats:
        return create_response(
            headers, None, f"Invalid output format '{output_format}'. \
              Supported formats are: {supported_formats}")

    valid_sample_rates = supported_formats[output_format]
    sample_rate = sample_rate or min(valid_sample_rates)

    if sample_rate not in valid_sample_rates:
        return create_response(
            headers, None, f"Invalid SampleRate '{sample_rate}'. \
             Allowed values are {valid_sample_rates}")

    voices = (valid_voices
              or polly.describe_voices(LanguageCode=language_code)['Voices'])

    voice_id = request_body.get('voiceId') or random.choice(voices)['Id']
    if voice_id not in [x['Id'] for x in voices]:
        return create_response(
            headers, None,
            f"Invalid voiceId '{voice_id}' for LanguageCode '{language_code}'")

    # Create tracking record in DynamoDB
    record = {
        'id': record_id,
        'taskStatus': 'QUEUED',
        'message': message,
        'outputFormat': output_format,
        'sampleRate': sample_rate,
        'voiceId': voice_id,
        'languageCode': language_code,
        'expiration': int(time()) + time_to_live
    }

    try:
        messages.put_item(Item=record)
    except botocore.exceptions.ClientError as err:
        if err.response['Error']['Code'] == max_throughput_error:
            error_code = 429
            error_message = f"Maximum throughput reached. Please retry later."
        else:
            error_code = 400
            error_message = err.response['Error']

        return create_response(headers, None, error_message, error_code)

    return create_response(headers, record)
Ejemplo n.º 6
0
def status(event, context):
    try:
        return create_response({
            'result': 'success',
            'status': fetch_status()['status'],
        })
    except Exception as e:
        print(repr(e))
        return create_response({
            'result': 'internal_error',
            'message': repr(e),
        }, status_code=500)
Ejemplo n.º 7
0
def get_response(last_message, emb_type, clas_type, print_info = False):
    """
    Loads all data needed to create a response and creates it. User can choose
    which embedding type and classifier will be used
    """
    # Load data for responses
    qa_data = read_qa(qa_path = 'data/qa_data/assignment_data.json')

    # Load embedding model
    embedding_model = load_semantic_model(model_type = emb_type)

    # Load classifier
    classifier = load_classifier(classifier_type = clas_type, model_type = emb_type)

    # Generate response
    response, emotion, topic, word_intersection = create_response(last_message,
                                                           qa_data,
                                                           classifier,
                                                           embedding_model, emb_type,
                                                           clas_type)
    # Print info if wanted
    if print_info:
        print()
        print("Received: {message}".format(message=last_message))
        print("Responded: {response}".format(response=response))
        print("Topic detected: {topic}".format(topic=topic))
        print("Emotion detected: {emotion}".format(emotion=emotion))
        print("Keywords detected [(keyword): (message_token)]: \n\t{intersection}".format(intersection=word_intersection))

    return response, emotion, topic, word_intersection
Ejemplo n.º 8
0
def lambda_handler(event, context):
    ec2 = boto3.resource('ec2')

    instances = list(
        ec2.instances.filter(Filters=[{
            'Name':
            'instance-state-name',
            'Values':
            ['running', 'stopped', 'pending', 'shutting-down', 'stopping']
        }]))

    if (len(instances) == 0):
        raise Exception(f'No instances found')

    result = {}

    for instance in instances:
        print(instance)
        result[instance.instance_id] = {
            "status": instance.state['Name'],
            "public_ip_address": instance.public_ip_address,
            "instance_type": instance.instance_type
        }

    return create_response(None, result)
Ejemplo n.º 9
0
 def _exec_get_table(self):
   table = core.overseer.path_preference_table._table
   result = [{
     "path_identifier": utils.serialize_path_identifier(path_identifier),
     "value": ApiHandler.REVERSED_OPTIONS[table[path_identifier]]
   } for path_identifier in table]
   return utils.create_response(result)
Ejemplo n.º 10
0
 def _exec_get_entry(self, quintet):
     try:
         path_identifier = utils.deserialize_path_identifier(quintet)
         entry = core.overseer.path_preference_table._table[path_identifier]
         return utils.create_response(ApiHandler.REVERSED_OPTIONS[entry])
     except KeyError:
         return make_error("No such entry in path preference table")
Ejemplo n.º 11
0
 def _exec_set_entry(self, quintet, option):
   try:
     path_identifier = utils.deserialize_path_identifier(quintet)
     core.overseer.path_preference_table._table[path_identifier] = ApiHandler.OPTIONS[option]
     return utils.create_response("")
   except KeyError:
     return make_error("No such entry in path preference table")
Ejemplo n.º 12
0
    def test_without_validate(self):
        async def post(check, data=None):
            app, srv, url = await self.create_server()
            resp = await self.client.post(url, data=json.dumps(data))
            self.assertEqual(200, resp.status)
            self.assertEqual(check, (await resp.json()))
            self.assertEqual(None, (await resp.release()))

        self.loop.run_until_complete(
            post(create_response(None, {"a": "b"}), create_request("hello")))
        self.loop.run_until_complete(
            post(create_response(123, {"a": "b"}),
                 create_request("hello", 123)))
        self.loop.run_until_complete(
            post(create_response("123", {"a": "b"}),
                 create_request("hello", "123")))
Ejemplo n.º 13
0
 def _exec_remove_entry(self, quintet):
     try:
         path_identifier = utils.deserialize_path_identifier(quintet)
         del core.overseer.path_preference_table._table[path_identifier]
         return utils.create_response("")
     except KeyError:
         return make_error("No such entry in path preference table")
Ejemplo n.º 14
0
 def _exec_get_entry(self, quintet):
   try:
     path_identifier = utils.deserialize_path_identifier(quintet)
     entry = core.overseer.path_preference_table._table[path_identifier]
     return utils.create_response(ApiHandler.REVERSED_OPTIONS[entry])
   except KeyError:
     return make_error("No such entry in path preference table")
Ejemplo n.º 15
0
 def _exec_remove_entry(self, quintet):
   try:
     path_identifier = utils.deserialize_path_identifier(quintet)
     del core.overseer.path_preference_table._table[path_identifier]
     return utils.create_response("")
   except KeyError:
     return make_error("No such entry in path preference table")
Ejemplo n.º 16
0
 def _exec_set_entry(self, quintet, option):
     try:
         path_identifier = utils.deserialize_path_identifier(quintet)
         core.overseer.path_preference_table._table[
             path_identifier] = ApiHandler.OPTIONS[option]
         return utils.create_response("")
     except KeyError:
         return make_error("No such entry in path preference table")
Ejemplo n.º 17
0
 def _exec_get_table(self):
     table = core.overseer.path_preference_table._table
     result = [{
         "path_identifier":
         utils.serialize_path_identifier(path_identifier),
         "value":
         ApiHandler.REVERSED_OPTIONS[table[path_identifier]]
     } for path_identifier in table]
     return utils.create_response(result)
Ejemplo n.º 18
0
    def test_without_validate(self):
        @asyncio.coroutine
        def post(check, data=None):
            app, srv, url = yield from self.create_server()
            resp = yield from self.client.post(url, data=json.dumps(data))
            self.assertEqual(200, resp.status)
            self.assertEqual(check, (yield from resp.json()))
            self.assertEqual(None, (yield from resp.release()))

        self.loop.run_until_complete(
            post(create_response(None, {"a": "b"}),
                 create_request("hello")))
        self.loop.run_until_complete(
            post(create_response(123, {"a": "b"}),
                 create_request("hello", 123)))
        self.loop.run_until_complete(
            post(create_response("123", {"a": "b"}),
                 create_request("hello", "123")))
Ejemplo n.º 19
0
def clean(event, context):
    try:
        # Check if model is currently training
        clean_status_config = fetch_object('cleanup.json')
        if clean_status_config['status'] != 'active':
            print('Status inactive')
            return create_response({
                'result': 'error',
                'message': 'cleanup status is inactive.'
            })

        # Fetch inference data
        infer_config = fetch_object(INFERENCE_CONFIG)

        # Loop through configs
        print('Checking configs')
        safe_objects = {}
        current_time = datetime.now()
        for token, infer_vals in infer_config.items():
            if token not in WHITELIST_TOKENS:
                creation_time = datetime.strptime(infer_vals['created'], '%d-%m-%y %H:%M')
                if (current_time - creation_time).seconds < 7200:  # 2 hours
                    safe_objects[token] = infer_vals
                else:  # Delete objects
                    delete_object(infer_vals['model_filename'])
                    if infer_vals['task_type'] == 'sentimentanalysis':
                        delete_object(infer_vals['metadata_filename'])
                    print('Deleted:', token)
            else:
                safe_objects[token] = infer_vals

        # Update inference json
        update_object(INFERENCE_CONFIG, safe_objects)

        return create_response({
            'result': 'success',
            'message': 'Old objects deleted'
        })
    except Exception as e:
        print(repr(e))
        return create_response({
            'result': 'internal_error',
            'message': repr(e),
        }, status_code=500)
Ejemplo n.º 20
0
def inference(event, context):
    try:
        # Fetch data
        data = fetch_post_data(event)
        infer_config = fetch_inference_json()
        print('post data and inference config fetched')

        # Check if token exists
        if not data['token'] in infer_config:
            print(f'Token {data["token"]} not found')
            return create_response({
                'result': 'error',
                'message': 'No such token found.'
            })

        # Make predictions
        task_config = infer_config[data['token']]
        if task_config['task_type'] == 'classification':
            model = fetch_classification_model(task_config['model_filename'])
            output = classify(model, data['input'], task_config['classes'])
        else:
            model_path, model_metadata_path = fetch_sa_data(
                task_config['model_filename'],
                task_config['metadata_filename'],
            )
            output = get_sentiment(data['input'], model_path,
                                   model_metadata_path)

        return create_response({
            'result': 'success',
            'prediction': output,
        })
    except Exception as e:
        print(repr(e))
        return create_response(
            {
                'result': 'internal_error',
                'message': repr(e),
            },
            status_code=500)
Ejemplo n.º 21
0
    def test_validate(self):
        async def post(check, data=None):
            app, srv, url = await self.create_server()
            resp = await self.client.post(url, data=json.dumps(data))
            self.assertEqual(200, resp.status)
            self.assertEqual(check, (await resp.json()))
            self.assertEqual(None, (await resp.release()))

        self.loop.run_until_complete(
            post(INVALID_PARAMS, create_request("v_hello")))
        self.loop.run_until_complete(
            post(create_response(result={"status": "ok"}),
                 create_request("v_hello", params={"data": "ok"})))
        self.loop.run_until_complete(
            post(create_response(1234, {"status": "ok"}),
                 create_request("v_hello", 1234, {"data": "ok"})))
        self.loop.run_until_complete(
            post(create_response(None, {"status": "ok"}),
                 create_request("v_hello", None, {"data": "ok"})))
        self.loop.run_until_complete(
            post(create_response("1", {"status": "OK"}),
                 create_request("v_hello", "1", {"data": "TEST"})))
        self.loop.run_until_complete(
            post(create_response(True, {"status": "ok"}),
                 create_request("v_hello", True, {"data": "ok"})))
        self.loop.run_until_complete(
            post(create_response(False, {"status": "ok"}),
                 create_request("v_hello", False, {"data": "ok"})))
Ejemplo n.º 22
0
def lambda_handler(event, context):
    ec2 = boto3.resource('ec2')

    request_body = event

    if (request_body['secret'] != secret_password):
        return create_response('Incorrect secret. Please enter a valid secret to alter server settings', None)

    instance_id = request_body['instance_id']
    instances = list(ec2.instances.filter(InstanceIds=[instance_id]))

    if (len(instances) != 1):
        return create_response(f'Instance: {instance_id} not found', None)

    instance = instances[0]
    message = ''

    # AWS instance state codes reference:
    # 0 : pending
    # 16 : running
    # 32 : shutting-down
    # 48 : terminated
    # 64 : stopping
    # 80 : stopped

    if (instance.state['Name'] == 'running'):
        # shut er down
        instance.stop()
        message = f'Initiated stop server command on {instance_id}'

    elif (instance.state['Name'] == 'stopped'):
        # start er up
        instance.start()
        message = f'Initiated start server command on {instance_id}'

    else:
        # pending, shutting down, terminated, or stopping
        message = f'Server is busy starting, shutting down, terminating, or stopping. Please try again later'

    return create_response(None, message)
Ejemplo n.º 23
0
    def test_validate(self):
        @asyncio.coroutine
        def post(check, data=None):
            app, srv, url = yield from self.create_server()
            resp = yield from self.client.post(url, data=json.dumps(data))
            self.assertEqual(200, resp.status)
            self.assertEqual(check, (yield from resp.json()))
            self.assertEqual(None, (yield from resp.release()))

        self.loop.run_until_complete(post(INVALID_PARAMS,
                                          create_request("v_hello")))
        self.loop.run_until_complete(
            post(create_response(result={"status": "ok"}),
                 create_request("v_hello", params={"data": "ok"})))
        self.loop.run_until_complete(
            post(create_response(1234, {"status": "ok"}),
                 create_request("v_hello", 1234, {"data": "ok"})))
        self.loop.run_until_complete(
            post(create_response(None, {"status": "ok"}),
                 create_request("v_hello", None, {"data": "ok"})))
        self.loop.run_until_complete(
            post(create_response("1", {"status": "OK"}),
                 create_request("v_hello", "1", {"data": "TEST"})))
        self.loop.run_until_complete(
            post(create_response(True, {"status": "ok"}),
                 create_request("v_hello", True, {"data": "ok"})))
        self.loop.run_until_complete(
            post(create_response(False, {"status": "ok"}),
                 create_request("v_hello", False, {"data": "ok"})))
Ejemplo n.º 24
0
 def _update_weight(self, dpid1, dpid2, weight="weight", value="1"):
     try:
         graph = core.overseer_topology.graph
         dpid1 = int(dpid1, 16)
         dpid2 = int(dpid2, 16)
         value = float(value)
         if graph.has_edge(dpid1, dpid2):
             graph[dpid1][dpid2][weight] = value
             return utils.create_response("")
         else:
             return make_error("Invalid link")
     except ValueError:
         return make_error("dpid must be hex and value must be float")
Ejemplo n.º 25
0
 def _update_weight(self, dpid1, dpid2, weight="weight", value="1"):
   try:
     graph = core.overseer_topology.graph
     dpid1 = int(dpid1, 16)
     dpid2 = int(dpid2, 16)
     value = float(value)
     if graph.has_edge(dpid1, dpid2):
       graph[dpid1][dpid2][weight] = value
       return utils.create_response("")
     else:
       return make_error("Invalid link")
   except ValueError:
     return make_error("dpid must be hex and value must be float")
Ejemplo n.º 26
0
def server_start(event, context):
    message = 'Status not active. Server not turned on.'

    server_status = fetch_status()
    if server_status['dev_mode']:
        message = 'Dev mode is on.'
    elif server_status['status'] == 'active':
        ec2_client = boto3.client('ec2', region_name=REGION)
        ec2_client.start_instances(InstanceIds=[INSTANCE_ID])
        message = 'Instance started.'

    print(message)
    return create_response({
        'message': message
    })
Ejemplo n.º 27
0
def server_stop(event, context):
    server_status = fetch_status()
    if server_status['dev_mode']:
        message = 'Dev mode is on.'
    else:
        # Stop instance
        ec2_client = boto3.client('ec2', region_name=REGION)
        ec2_client.stop_instances(InstanceIds=[INSTANCE_ID])
        message = 'Instance stopped.'

        # Change server status
        change_server_status('sleeping', server_status['dev_mode'])

    print(message)
    return create_response({
        'message': message
    })
Ejemplo n.º 28
0
    def test_validate(self):
        @asyncio.coroutine
        def call(check, method, data=None, id=None):
            app, srv, client = yield from self.create_server(middlewares=[
                custom_errorhandler_middleware])

            resp = Response(**check)

            if not id:
                id = resp.id

            ret = yield from client.call(method, data, id=id)

            self.assertEqual(resp.error, ret.error)
            self.assertEqual(resp.result, ret.result)

            if resp.id:
                self.assertEqual(resp.id, ret.id)

        self.loop.run_until_complete(call(INVALID_PARAMS, "v_hello"))
        self.loop.run_until_complete(
            call(create_response(result={"status": "ok"}),
                 "v_hello", {"data": "ok"}))
        self.loop.run_until_complete(
            call(create_response(1234, {"status": "ok"}),
                 "v_hello", {"data": "ok"}, 1234))
        self.loop.run_until_complete(
            call(create_response(None, {"status": "ok"}),
                 "v_hello", {"data": "ok"}, None))
        self.loop.run_until_complete(
            call(create_response("1", {"status": "OK"}),
                 "v_hello", {"data": "TEST"}, "1"))
        self.loop.run_until_complete(
            call(create_response(True, {"status": "ok"}),
                 "v_hello", {"data": "ok"}, True))
        self.loop.run_until_complete(
            call(create_response(False, {"status": "ok"}),
                 "v_hello", {"data": "ok"}, False))

        self.loop.run_until_complete(call(NOT_FOUND, "not_found"))
        self.loop.run_until_complete(call(INVALID_PARAMS, "v_hello"))
        self.loop.run_until_complete(call(INTERNAL_ERROR, "err_exc"))
        self.loop.run_until_complete(call(INTERNAL_ERROR, "err_exc2"))
        self.loop.run_until_complete(call(CUSTOM_ERROR_GT, "err_gt"))
        self.loop.run_until_complete(call(CUSTOM_ERROR_LT, "err_lt"))
Ejemplo n.º 29
0
    def test_validate(self):
        async def call(check, method, data=None, id=None):
            app, srv, client = await self.create_server(middlewares=[
                custom_errorhandler_middleware])

            resp = Response(**check)

            if not id:
                id = resp.id

            ret = await client.call(method, data, id=id)

            self.assertEqual(resp.error, ret.error)
            self.assertEqual(resp.result, ret.result)

            if resp.id:
                self.assertEqual(resp.id, ret.id)

        self.loop.run_until_complete(call(INVALID_PARAMS, "v_hello"))
        self.loop.run_until_complete(
            call(create_response(result={"status": "ok"}),
                 "v_hello", {"data": "ok"}))
        self.loop.run_until_complete(
            call(create_response(1234, {"status": "ok"}),
                 "v_hello", {"data": "ok"}, 1234))
        self.loop.run_until_complete(
            call(create_response(None, {"status": "ok"}),
                 "v_hello", {"data": "ok"}, None))
        self.loop.run_until_complete(
            call(create_response("1", {"status": "OK"}),
                 "v_hello", {"data": "TEST"}, "1"))
        self.loop.run_until_complete(
            call(create_response(True, {"status": "ok"}),
                 "v_hello", {"data": "ok"}, True))
        self.loop.run_until_complete(
            call(create_response(False, {"status": "ok"}),
                 "v_hello", {"data": "ok"}, False))

        self.loop.run_until_complete(call(NOT_FOUND, "not_found"))
        self.loop.run_until_complete(call(INVALID_PARAMS, "v_hello"))
        self.loop.run_until_complete(call(INTERNAL_ERROR, "err_exc"))
        self.loop.run_until_complete(call(INTERNAL_ERROR, "err_exc2"))
        self.loop.run_until_complete(call(CUSTOM_ERROR_GT, "err_gt"))
        self.loop.run_until_complete(call(CUSTOM_ERROR_LT, "err_lt"))
Ejemplo n.º 30
0
 def _exec_get_edges(self):
   return utils.create_response(core.overseer_topology.graph.edges())
Ejemplo n.º 31
0
 def _exec_get_edges(self):
     return utils.create_response(core.overseer_topology.graph.edges())
Ejemplo n.º 32
0
 def create_response(self, data, url):
     return utils.create_response(data, self.secret, url)
Ejemplo n.º 33
0
 def _exec_ping(self):
     return utils.create_response("pong")
Ejemplo n.º 34
0
 def _exec_get_nodes(self):
     nodes = core.overseer_topology.graph.nodes()
     nodes = ["%x" % node for node in nodes]
     return utils.create_response(nodes)
Ejemplo n.º 35
0
 def _exec_get_options(self):
   return utils.create_response(ApiHandler.OPTIONS.keys())
Ejemplo n.º 36
0
 def _exec_echo(self, message="Goodbye, sad world!"):
     return utils.create_response(message)
Ejemplo n.º 37
0
 def _exec_echo(self, message="Goodbye, sad world!"):
   return utils.create_response(message)
Ejemplo n.º 38
0
 def _exec_ping(self):
   return utils.create_response("pong")
Ejemplo n.º 39
0
 def _exec_get_nodes(self):
   nodes = core.overseer_topology.graph.nodes()
   nodes = ["%x" % node for node in nodes]
   return utils.create_response(nodes)
Ejemplo n.º 40
0
 def _exec_get_options(self):
     return utils.create_response(ApiHandler.OPTIONS.keys())
Ejemplo n.º 41
0
	def create_response(self, data, url):
		return utils.create_response(data, self.secret, url)