def __init__(self):
     self.headers = {}
     self.objects = None
     self.count = 0
     self.tick = time.time()
     self.maxEventsInBuffer = 100
     self.stream_types = {}
     self.is_first_for_type = {
         "met": True,
         "eeg": True,
         "fac": True,
         "mot": True,
     }
     self.c = Cortex(user, debug_mode=False)
     self.c.do_prepare_steps()
     self.time_slice = 0
     self.time_data = {
         'engagement': {},
         'excitement': {},
         'stress': {},
         'relaxation': {},
         'interest': {},
         'focus': {}
     }
     self.time_slice_averages = {}
示例#2
0
class Record():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def create_record_then_export(self, record_name, record_description,
                                  record_length_s, record_export_folder,
                                  record_export_data_types,
                                  record_export_format, record_export_version):

        self.c.create_record(record_name, record_description)

        self.wait(record_length_s)

        self.c.stop_record()

        self.c.disconnect_headset()

        self.c.export_record(record_export_folder, record_export_data_types,
                             record_export_format, record_export_version,
                             [self.c.record_id])

    def wait(self, record_length_s):
        print('start recording -------------------------')
        length = 0
        while length < record_length_s:
            print('recording at {0} s'.format(length))
            time.sleep(1)
            length += 1
        print('end recording -------------------------')
示例#3
0
 def func(self):
     url = "wss://localhost:6868"
     user = {
         "license": "",
         "client_id": "",
         "client_secret": "",
         "debit": 100,
         "number_row_data": 10
     }
     self.count = 0
     self.cortex = Cortex(url, user)
     self.headset_id = self.cortex.query_headset()
     self.cortex.connect_headset()
     self.cortex.request_access()
     auth = self.cortex.authorize()
     self.cortex.create_session(auth, self.headset_id)
     status = 'load'
     profile_name = 'skshreyas'
     self.cortex.setup_profile(profile_name, status)
     stream_list = ['com']
     self.lis = []
     self.cortex.subscribe(stream_list)
     while True:
         resp = self.cortex.ws.recv()
         res = json.loads(resp)
         mentalCommandList = res['com']
         action = mentalCommandList[0]
         #print(action)
         self.FocusChanger(action)
class Subcribe():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def sub(self, streams):
        self.c.sub_request(streams)
示例#5
0
    def __init__(self, logging_groups=['Default', 'Cortex']):
        Cortex.__init__(self, 'SOUL', logging_groups)

        # The following is parsing the defines.txt to get the app specific data
        defines = utils.parse_defines(
            utils.read_txt("../database/cortex/soul.txt"))
        self.delay = eval(defines['delay'])
示例#6
0
    def __init__(self, screen, logging_groups=['Default', 'Cortex']):
        Cortex.__init__(self, 'Collector', logging_groups)

        # Current collection platform
        self.platform = GroupMe_Web(screen, logging_groups)

        # The following is parsing the defines.txt to get the app specific data
        defines = utils.parse_defines(
            utils.read_txt("../database/cortex/collector.txt"))
        self.alias_dict = eval(defines['member_alias'])
class LiveAdvance():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def live(self, profile_name):
        print('begin live mode ----------------------------------')
        self.c.setup_profile(profile_name=profile_name, status='load')
        self.c.sub_request(stream=['com'])

    def get_sensitivity(self, profile_name):
        self.c.get_mental_command_action_sensitivity(profile_name)

    def set_sensitivity(self, profile_name, values):
        self.c.set_mental_command_action_sensitivity(profile_name, values)
def start_chart(data):
    # print('received message: ' + str(data), flush=sys.stdout)
    chart_cortex = Cortex(user)
    PROFILE_NAME = "RealBCI"
    STREAM = "pow"
    chart_cortex.do_prepare_steps()
    chart_cortex.setup_profile(PROFILE_NAME, 'load')
    chart_cortex.sub_request(stream=[STREAM], emit=emit)
def run(req: dict):
    payload = req["payload"]
    instances = payload["instances"]
    exp_name = payload["exp_name"]
    run_id = None
    if "run_id" in payload:
        run_id = payload["run_id"]

    # if model is not loaded
    client = Cortex.client(api_endpoint=req["apiEndpoint"], project=req["projectId"], token=req["token"])
    
    model_ctx[exp_name] = init_model(exp_name, run_id, client, req["projectId"])

    # retrieve model from the context
    model_obj = model_ctx[exp_name]

    # using encoder from model object
    encoder = model_obj["encoder"]

    instances = np.array(instances, dtype=object)
    instances = instances if instances.ndim == 2 else np.reshape(instances, (1, -1))

    instances = encoder(instances)

    # predict
    predictions = model_obj["model"].predict(instances)
    scores = model_obj["model"].predict_proba(instances)
    labels = model_obj["model"].classes_
    return {
        "payload": {
            "predictions": predictions.tolist(),
            "scores": scores.tolist(),
            "labels": labels.tolist()
        }
    }
示例#10
0
def main():
    with open('cred.json') as json_file:
        user = json.load(json_file)
    cortex = Cortex(user, True)
    # do_stuff(cortex)
    record = Record(cortex)
    record.run()
def run(request_body: dict):
    # Get agent/skill activation request body
    api_endpoint = request_body["apiEndpoint"]
    project = request_body["projectId"]
    token = request_body["token"]
    connection_name = request_body["payload"]["connection_name"]
    query = request_body["payload"]["query"]

    try:
        # Create Cortex client
        client = Cortex.client(api_endpoint=api_endpoint, project=project, token=token)

        # Get connection and create mongo client
        connection = client.get_connection(connection_name)
        params = dict(map(lambda l: (l['name'], l['value']), connection['params']))
        mongo = MongoClient(params["uri"])

        # Use connection
        database = params.get("database")
        collection = params.get("collection")

        if database and collection:
            result = list(mongo[database][collection].find(query))
        else:
            result = {"error": "collection, database and query must be provided"}
    except Exception as e:
        result = {"error": str(e)}
        logger.exception(e)

    # Return result
    return {'payload': result}
示例#12
0
def domain_controller(params: dict):
    # initialise the Cortex client
    api_endpoint = params.get('apiEndpoint')
    token = params.get('token')
    project_id = params.get('projectId')
    client = Cortex.client(api_endpoint=api_endpoint,
                           token=token,
                           project=project_id)

    # # get secrets keys
    # os.environ["AWS_SECRET_ACCESS_KEY"] = str(client.get_secret("awssecretkey"))
    # os.environ["AWS_ACCESS_KEY_ID"] = str(client.get_secret("awspublickey"))

    # just in case there are old environment variables for hadron
    for key in os.environ.keys():
        if key.startswith('HADRON'):
            del os.environ[key]

    # extract the payload
    payload = params.get('payload', {})

    # get the domain contract repo from the payload
    uri_pm_repo = payload.get('domain_contract_repo')
    if not isinstance(uri_pm_repo, str):
        raise KeyError(
            "The message parameters passed do not have the mandatory 'domain_contract_repo' payload key"
        )

    # extract any extra kwargs
    hadron_kwargs = payload.get('hadron_kwargs', {})
    # export and pop any environment variable from the kwargs
    for key in hadron_kwargs.copy().keys():
        if str(key).isupper():
            os.environ[key] = hadron_kwargs.pop(key)
    # pop the run_controller attributes from the kwargs
    run_book = hadron_kwargs.pop('runbook', None)
    mod_tasks = hadron_kwargs.pop('mod_tasks', None)
    repeat = hadron_kwargs.pop('repeat', None)
    sleep = hadron_kwargs.pop('sleep', None)
    run_time = hadron_kwargs.pop('run_time', None)
    run_cycle_report = hadron_kwargs.pop('run_cycle_report', None)
    source_check_uri = hadron_kwargs.pop('source_check_uri', None)

    # instantiate the Controller passing any remaining kwargs
    controller = Controller.from_env(uri_pm_repo=uri_pm_repo,
                                     default_save=False,
                                     has_contract=True,
                                     **hadron_kwargs)
    # run the controller nano services.
    controller.run_controller(run_book=run_book,
                              mod_tasks=mod_tasks,
                              repeat=repeat,
                              sleep=sleep,
                              run_time=run_time,
                              source_check_uri=source_check_uri,
                              run_cycle_report=run_cycle_report)
示例#13
0
class TrainAdvance():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def get_active_action(self, profile_name):
        self.c.get_mental_command_active_action(profile_name)

    def get_command_brain_map(self, profile_name):
        self.c.get_mental_command_brain_map(profile_name)

    def get_training_threshold(self):
        self.c.get_mental_command_training_threshold(profile_name)
示例#14
0
def delete(req: dict):
    payload = req['payload']
    client = Cortex.client(api_endpoint=req["apiEndpoint"],
                           project=req["projectId"],
                           token=req["token"])

    session_id = None
    if "session_id" in payload:
        session_id = payload["session_id"]
    else:
        return {'payload': "session_id is required"}
    result = SessionClient(client).delete_session(session_id, req["projectId"])
    return {"payload": result}
示例#15
0
def start(req: dict):
    payload = req['payload']
    client = Cortex.client(api_endpoint=req["apiEndpoint"],
                           project=req["projectId"],
                           token=req["token"])
    ttl = None
    description = "No description given"
    if "ttl" in payload:
        ttl = payload["ttl"]
    if "description" in payload:
        description = payload["description"]
    session_client = SessionClient(client)
    session = session_client.start_session(ttl, description, req["projectId"])
    return {'payload': {"session_id": session}}
def record():
    try:
        app.app_cortex = Cortex(user)
        app.app_cortex.do_prepare_steps()
        app.app_cortex.create_record(record_name, record_description)
        response = app.response_class(response=json.dumps(
            {"record_id": app.app_cortex.record_id}),
                                      status=200,
                                      mimetype='application/json')
        return response
    except Exception as e:
        print(e, flush=sys.stdout)
        response = app.response_class(response=json.dumps(
            {"error": 'Get Error'}),
                                      status=400,
                                      mimetype='applica   tion/json')
        return response
示例#17
0
def run(request_body: dict):
    # Get agent/skill activation request body
    api_endpoint = request_body["apiEndpoint"]
    project = request_body["projectId"]
    token = request_body["token"]
    experiment_name = request_body["payload"]["experiment_name"]
    instance = request_body["payload"]["instance"]

    # Create Cortex client and get experiment
    client = Cortex.client(api_endpoint=api_endpoint, project=project, token=token)
    experiment = client.experiment(experiment_name)

    # Get model from last experiment run
    exp_run = experiment.last_run()
    model = exp_run.get_artifact('model')

    # Return model predict
    return {'payload': model.predict(instance).tolist()}
示例#18
0
def put(req: dict):
    payload = req['payload']
    client = Cortex.client(api_endpoint=req["apiEndpoint"],
                           project=req["projectId"],
                           token=req["token"])

    session_id = None
    if "session_id" in payload:
        session_id = payload["session_id"]
    else:
        return {'payload': "session_id is required"}
    data = {}
    if "data" in payload:
        data = payload["data"]
    else:
        return {'payload': "data is required"}
    result = SessionClient(client).put_session_data(session_id, data,
                                                    req["projectId"])
    return {"payload": result}
def process(params):
    # create a Cortex client instance from the job's parameters
    client = Cortex.client(api_endpoint=params['apiEndpoint'], token=params['token'])
    # get the agent payload
    payload = params.get('payload',{})
    # You can print logs to the console these are collected by docker/k8s
    print(f'Got payload: {payload}')
    # use the `client` instance to use Cortex client libraries
    content_client = ManagedContentClient(client);
    if 'activationId' in params:
        file_name = f'jobchain-data-{params["activationId"]}'
    else:
        #
        file_name = f'jobchain-data-{int(time.time())}'
    # Read `recordCount` from payload, have a default value of raising an Exception is recommended.
    record_count = payload.get('recordCount', 1000)
    # This is streaming the records to Cortex's managed content
    content_client.upload_streaming(file_name, datagen_stream(record_count), 'application/x-jsonlines')
    print(f'Wrote datafile to managed content key: {file_name}')
示例#20
0
def get(req: dict):
    payload = req['payload']
    client = Cortex.client(api_endpoint=req["apiEndpoint"],
                           project=req["projectId"],
                           token=req["token"])

    session_id = None
    if "session_id" in payload:
        session_id = payload["session_id"]
    else:
        return {'payload': "session_id is required"}
    key = None
    if "key" in payload:
        key = payload["key"]
        if len(key) < 1:
            key = None
    session_client = SessionClient(client)
    session = session_client.get_session_data(session_id, key,
                                              req["projectId"])
    return {'payload': session}
示例#21
0
def process(params):
    # create a Cortex client instance from the job's parameters
    client = Cortex.client(api_endpoint=params['apiEndpoint'],
                           token=params['token'])
    # get he agent payload
    payload = params['payload']
    # You can print logs to the console these are collected by docker/k8s
    print(f'Got payload: {payload}')
    if 'activationId' in params:
        content_key = f'jobchain-data-{params["activationId"]}'
    else:
        if 'datafileKey' not in payload:
            raise Exception("'datafileKey' is required in the payload")
        content_key = payload['datafileKey']
    print(f'Fetching datafile from managed content: {content_key}')
    # use the `client` instance to use Cortex client libraries
    content_client = ManagedContentClient(client)
    # This is streaming the records to Cortex's managed content
    # if this was called as part of an agent
    content = content_client.download(content_key)
    df = pd.read_json(content, lines=True)
    counts = df['color'].value_counts()
    print(f'{counts.to_json()}')
def load_model(api_endpoint: str, token: str, project_id: str,
               experiment_name: str, run_id: str, artifact_key: str):
    global model

    if not experiment_name:
        raise ValueError(
            "experiment-name is required if a model is not initialized")

    # Initialize Cortex Client
    client = Cortex.client(api_endpoint=api_endpoint,
                           token=token,
                           project=project_id)

    # Load Model from the experiment run
    logging.info("Loading model artifacts from experiment run...")
    try:
        experiment = client.experiment(experiment_name)
        run = experiment.get_run(run_id) if run_id else experiment.last_run()
        model = run.get_artifact(artifact_key)
    except Exception as e:
        logging.error("Error: Failed to load model: {}".format(e))
        raise

    logging.info("Model Loaded!")
示例#23
0
def test():
    cortex = Cortex(None)
    cortex.do_prepare_steps()
def train(params):
    project = params['projectId']
    # create a Cortex client instance from the job's parameters
    client = Cortex.client(api_endpoint=params['apiEndpoint'], project=project, token=params['token'])

    payload = params['payload']
    # Read connection
    connection_name = payload['connection_name']
    print(f'Reading connection {connection_name}')
    connection = client.get_connection(connection_name)

    # Download training data using connection
    download_training_data(connection)
    print(f'Downloaded training data for {connection_name}')

    random.seed(0)
    np.random.seed(0)

    # Load dataset
    data = pd.read_csv('german_credit_eval.csv')

    # Separate outcome
    y = data['outcome']
    x = data.drop('outcome', axis=1)

    # Bring in test and training data
    x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=0)

    # Create an encoder
    cat_columns = [
        'checkingstatus',
        'history',
        'purpose',
        'savings',
        'employ',
        'status',
        'others',
        'property',
        'age',
        'otherplans',
        'housing',
        'job',
        'telephone',
        'foreign'
    ]
    encoder = CatEncoder(cat_columns, x, normalize=True)
    encoded_x_train = encoder(x_train.values)
    encoded_x_test = encoder(x_test.values)

    # Train a decision tree model
    dtree = DecisionTreeClassifier(criterion='entropy', random_state=0)
    dtree.fit(encoded_x_train, y_train.values)
    dtree_acc = dtree.score(encoded_x_test, y_test.values)

    # Train a multi-layer perceptron model
    mlp = MLPClassifier(hidden_layer_sizes=(20, 20), max_iter=2000)
    mlp.fit(encoded_x_train, y_train.values)
    mlp_acc = mlp.score(encoded_x_test, y_test.values)

    # Train a support vector machine model
    SVM = svm.SVC(gamma='scale', probability=True)
    SVM.fit(encoded_x_train, y_train.values)
    svm_acc = SVM.score(encoded_x_test, y_test.values)

    # Train a logistic regression model
    logit = LogisticRegression(random_state=0, solver='lbfgs')
    logit.fit(encoded_x_train, y_train.values)
    logit_acc = logit.score(encoded_x_test, y_test.values)

    # Save model meta-data

    model_name = payload["model_name"]

    save_model(client, project, model_name, payload.get("model_title", ""), payload.get("model_description", ""),
               payload.get("model_source", ""), payload.get("model_type", ""), payload.get("model_status", ""), payload.get("model_tags", []))

    # Save models as pickle files and Save experiments
    pickle_model(dtree, encoder, 'Decision Tree', dtree_acc, 'Basic Decision Tree model', 'german_credit_dtree.pkl')
    pickle_model(logit, encoder, 'LOGIT', logit_acc, 'Basic LOGIT model', 'german_credit_logit.pkl')
    pickle_model(mlp, encoder, 'MLP', mlp_acc, 'Basic MLP model', 'german_credit_mlp.pkl')
    pickle_model(SVM, encoder, 'SVM', svm_acc, 'Basic SVM model', 'german_credit_svm.pkl')

    save_experiment(client, 'gc_dtree_exp', 'german_credit_dtree.pkl', 'DecisionTreeClassifier', model_name, project)
    save_experiment(client, 'gc_logit_exp', 'german_credit_logit.pkl', 'LogisticRegression', model_name, project)
    save_experiment(client, 'gc_mlp_exp', 'german_credit_mlp.pkl', 'MLPClassifier', model_name, project)
    save_experiment(client, 'gc_svm_exp', 'german_credit_svm.pkl', 'SVM', model_name, project)
 def __init__(self):
     self.c = Cortex(user, debug_mode=True)
     self.c.do_prepare_steps()
示例#26
0
class Marker():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def add_markers(self, marker_numbers):
        for m in range(marker_numbers):
            marker_time = time.time() * 1000
            print('add marker at : ', marker_time)

            marker = {
                "label": str(m),
                "value": "test_marker",
                "port": "python-app",
                "time": marker_time
            }

            self.c.inject_marker_request(marker)

            # add marker each seconds
            time.sleep(3)

    def demo_add_marker(self, record_export_folder, marker_numbers):
        # create record
        record_name = 'demo marker'
        record_description = 'demo marker'
        self.c.create_record(record_name, record_description)

        self.add_markers(marker_numbers)

        self.c.stop_record()

        self.c.disconnect_headset()

        # export record
        record_export_data_types = ['EEG', 'MOTION', 'PM', 'BP']
        record_export_format = 'CSV'
        record_export_version = 'V2'
        self.c.export_record(record_export_folder, record_export_data_types,
                             record_export_format, record_export_version,
                             [self.c.record_id])
    # Save model
    pickle.dump(clf, open(local_pickle_file, "wb"))


# The starting point for the job
if __name__ == '__main__':
    # Get agent/skill activation request body
    request_body = json.loads(sys.argv[1])
    api_endpoint = request_body["apiEndpoint"]
    project = request_body["projectId"]
    token = request_body["token"]
    experiment_name = request_body["payload"]["experiment_name"]

    train_and_save_model()

    # Create Cortex client and create experiment
    client = Cortex.client(api_endpoint=api_endpoint,
                           project=project,
                           token=token)
    experiment = client.experiment(experiment_name)

    # Upload model to experiment run in Cortex
    model = open(local_pickle_file, "rb")
    run = experiment.start_run()
    run.log_artifact_stream("model", model)
    run.set_meta("algo", "RandomForestClassifier Model")

    print(
        f'Created experiment "{experiment_name}". Started Run {run.id}. Logged RandomForestClassifier model.'
    )
示例#28
0
from pyboy import PyBoy
import ctypes
from time import sleep
import os
from command_handler import command_handler
from cca_handler import cca_handler
from pyboy_controller import pyboy_controller
import time


# Initialize our variables
use_csv = True
# TEMP: feel free to change how I do this. I say 'if not using csv then use emotiv' here and in #EACHFRAMEPOG - Matt
# NOTE: still will save a csv file even though I am reading from a csv file
if not use_csv:
    cortex = Cortex(None)
    cortex.do_prepare_steps()
    generator = cortex.sub_request(['eeg'])
    # generator = cortex.sub_request_pow(['pow'])
    next(generator).queue
    data_columns = ["P7", "O1", "O2", "P8", "TIME"]
    # data_columns = ["O1/theta","O1/alpha","O1/betaL","O1/betaH","O1/gamma",
    #                 "O2/theta","O2/alpha","O2/betaL","O2/betaH","O2/gamma", "TIME"]


# CSV CONTROL
if use_csv:
    recording_data = pd.read_csv('old_bad_recordings/first_target_0.csv')
    record_length = len(recording_data.index)
    channels = ['P7', 'O1', 'O2', 'P8'] # only data channels
    row_index = 0
示例#29
0
class Train():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def train(self, profile_name, training_action, number_of_train):

        stream = ['sys']
        self.c.sub_request(stream)

        profiles = self.c.query_profile()

        if profile_name not in profiles:
            status = 'create'
            self.c.setup_profile(profile_name, status)

        status = 'load'
        self.c.setup_profile(profile_name, status)

        print('begin train -----------------------------------')
        num_train = 0
        while num_train < number_of_train:
            num_train = num_train + 1

            print('start training {0} time {1} ---------------'.format(
                training_action, num_train))
            print('\n')
            status = 'start'
            self.c.train_request(detection='mentalCommand',
                                 action=training_action,
                                 status=status)

            print('accept {0} time {1} ---------------'.format(
                training_action, num_train))
            print('\n')
            status = 'accept'
            self.c.train_request(detection='mentalCommand',
                                 action=training_action,
                                 status=status)

        print('save trained action')
        status = "save"
        self.c.setup_profile(profile_name, status)

        status = 'unload'
        self.c.setup_profile(profile_name, status)

    def live(self, profile_name):
        print('begin live mode ----------------------------------')
        # load profile
        status = 'load'
        self.c.setup_profile(profile_name, status)

        # sub 'com' stream and view live mode
        stream = ['com']
        self.c.sub_request(stream)
class Subcribe():
    def __init__(self):
        self.headers = {}
        self.objects = None
        self.count = 0
        self.tick = time.time()
        self.maxEventsInBuffer = 100
        self.stream_types = {}
        self.is_first_for_type = {
            "met": True,
            "eeg": True,
            "fac": True,
            "mot": True,
        }
        self.c = Cortex(user, debug_mode=False)
        self.c.do_prepare_steps()
        self.time_slice = 0
        self.time_data = {
            'engagement': {},
            'excitement': {},
            'stress': {},
            'relaxation': {},
            'interest': {},
            'focus': {}
        }
        self.time_slice_averages = {}

    def add_cognitive_data_to_time_aggregation(self, event):
        right_now = time.time()
        current_time = str(right_now * 1000)
        td = self.time_data
        for key in td.keys():
            new_value = event.get(key, None)
            if new_value:
                td[key][current_time] = new_value

        self.time_slice_averages = {}
        for key in td.keys():
            ds = td[key]
            keepers = {}
            for t in ds.keys():
                last_time = float(t) / 1000
                if (right_now - last_time) <= 30:
                    keepers[t] = ds[t]
            td[key] = keepers
            vals = keepers.values()
            self.time_slice_averages[key] = sum(vals) / len(vals)
        event["last_30s"] = self.time_slice_averages

    def prepare_metadata(self):
        metadata = self.c.session_context["result"]
        del metadata["recordIds"]
        del metadata["recording"]
        del metadata["id"]
        headset = metadata["headset"]
        del headset["motionSensors"]
        del headset["sensors"]
        del metadata["license"]
        del metadata["performanceMetrics"]
        del metadata["stopped"]
        del metadata["streams"]
        if self.c.user_id is None:
            self.c.user_id = metadata["owner"]
        self.c.device_id = headset["id"]
        self.c.component_id = headset[
            "virtualHeadsetId"] or "00000000-0000-0000-0000-000000000000"
        del metadata["owner"]
        del headset["id"]
        del headset["virtualHeadsetId"]

    def create_records_structure(self):
        o = {
            "id": str(uuid.uuid4()),
            "sessionId": self.c.session_id,
            "userId": self.c.user_id,
            "deviceId": self.c.device_id.lower(),
            "componentId": self.c.component_id,
            "ts": datetime.datetime.now().isoformat(),
            "metadata": {},
            "eeg": [],
            "cognitive": [],
            "facial": [],
            "motion": [],
        }
        metadata = self.c.session_context["result"]
        o["metadata"] = metadata
        return o

    def publish_records(self):
        my_keys = [
            'engagement', 'excitement', 'stress', 'relaxation', 'interest',
            'focus'
        ]
        if len(self.objects['cognitive']) > 0:
            event_data = self.objects
            rows = self.objects['cognitive']
            for x in range(len(rows)):
                row = rows[x]
                vals = []
                for y in range(len(my_keys)):
                    k = my_keys[y]
                    vals.append(str(row[k]))
                print(','.join(vals))
            # print(json.dumps(event_data, indent=4))
            send_kinesis(kinesis_client, kinesis_stream_name,
                         kinesis_shard_count, event_data)  # send it!

        self.objects = self.create_records_structure()
        self.count = 0
        self.tick = time.time()

    def has_all_keys(self, record, keys):
        answer = True
        for k in keys:
            answer = (answer and (k in record)) or False
        return answer

    def add_event(self, event, stream_name, sid):
        if stream_name == 'met':
            self.objects['cognitive'].append(event)
        elif stream_name == 'eeg':
            self.objects['eeg'].append(event)
        elif stream_name == 'fac':
            self.objects['facial'].append(event)
        elif stream_name == 'mot':
            self.objects['motion'].append(event)

    def map_met(self, event):
        if not event['engagementEnabled']:
            del event['engagement']
        if not event['excitementEnabled']:
            del event['excitement']
            del event['excitementLast1Min']
        if not event['stressEnabled']:
            del event['stress']
        if not event['relaxationEnabled']:
            del event['relaxation']
        if not event['interestEnabled']:
            del event['interest']
        if not event['focusEnabled']:
            del event['focus']
        del event['engagementEnabled']
        del event['excitementEnabled']
        del event['stressEnabled']
        del event['relaxationEnabled']
        del event['interestEnabled']
        del event['focusEnabled']
        self.add_cognitive_data_to_time_aggregation(event)

    def map_eeg(self, event):
        if not event['hasMarkers']:
            del event['markers']
            del event['hasMarkers']
        else:
            event['hasMarkers'] = True

        if not event['wasInterpolated']:
            del event['wasInterpolated']
        else:
            event['wasInterpolated'] = True

    def map_mot(self, e):
        if not e['wasInterpolated']:
            del e['wasInterpolated']
        else:
            e['wasInterpolated'] = True

    def is_facial_data_redundant(self, event):
        recent_facial_records = self.objects['facial']
        if len(recent_facial_records) <= 0:
            return False
        last_facial_record = recent_facial_records[len(recent_facial_records) -
                                                   1]
        compare_fields = [
            "eyes", "upperFace", "upperFacePower", "lowerFace",
            "lowerFacePower"
        ]
        is_identical = True
        for field in compare_fields:
            if not (event[field] == last_facial_record[field]):
                is_identical = False
        return is_identical

    def is_data_sample_relevant(self, event):
        n = event['n']
        # Get every 10th record using mathematics modulo
        return (n % 5) == 0

    def map_event(self, record, stream_name):
        event = {}
        sid = record['sid']
        time_value = record['time']
        headers = self.headers[stream_name]
        if stream_name in record:
            metrics = record[stream_name]
            for i in range(len(headers)):
                key = headers[i]
                mapped_key = field_name_mapping[key] or key
                val = metrics[i]
                event[mapped_key] = val
            event['ts'] = time_value

            if stream_name == 'met':
                self.map_met(event)
            elif stream_name == 'eeg':
                if self.is_data_sample_relevant(event):
                    self.map_eeg(event)
                else:
                    event = None
            elif stream_name == 'mot':
                if self.is_data_sample_relevant(event):
                    self.map_mot(event)
                else:
                    event = None
            elif stream_name == 'fac':
                if self.is_facial_data_redundant(event):
                    event = None
        return event, sid

    def get_record_type(self, record):
        record_type = None
        if 'eeg' in record: record_type = 'eeg'
        elif 'mot' in record: record_type = 'mot'
        elif 'fac' in record: record_type = 'fac'
        elif 'met' in record: record_type = 'met'
        return record_type

    def process_headers(self, record):
        header_data = record and record['result'] and record['result'][
            'success']
        for i in range(len(header_data)):
            header = header_data[i]
            cols = header['cols']
            stream_name = header['streamName']
            self.headers[stream_name] = cols

    def on_data_received(self, data):
        try:
            record = json.loads(data)
        except e as Error:
            print(e)
        sid = None
        stream_name = None

        # if the data record has a 'sid' field at the top level, then it is a summary
        record_type = self.get_record_type(record)
        if record_type:
            self.count = self.count + 1
            event, sid = self.map_event(record, record_type)
            if event:
                self.add_event(event, record_type, sid)
                current_time = time.time()
                # if (self.count >= self.maxEventsInBuffer) or ((current_time - self.tick) >= 1):
                if (current_time - self.tick) >= 1:
                    self.publish_records()
        else:
            # Otherwise this is a special header record with all of the columns defined
            self.process_headers(record)

    def start(self, user_id, streams):
        if self.c.ready_to_use:
            self.c.user_id = user_id
            self.prepare_metadata()
            self.c.add_callback(self.on_data_received)
            self.count = 0
            self.objects = self.create_records_structure()
            print(','.join([
                'engagement', 'excitement', 'stress', 'relaxation', 'interest',
                'focus'
            ]))
            self.c.sub_request(streams)