class Subcribe():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def sub(self, streams):
        self.c.sub_request(streams)
Ejemplo n.º 2
0
class Record():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def create_record_then_export(self, record_name, record_description,
                                  record_length_s, record_export_folder,
                                  record_export_data_types,
                                  record_export_format, record_export_version):

        self.c.create_record(record_name, record_description)

        self.wait(record_length_s)

        self.c.stop_record()

        self.c.disconnect_headset()

        self.c.export_record(record_export_folder, record_export_data_types,
                             record_export_format, record_export_version,
                             [self.c.record_id])

    def wait(self, record_length_s):
        print('start recording -------------------------')
        length = 0
        while length < record_length_s:
            print('recording at {0} s'.format(length))
            time.sleep(1)
            length += 1
        print('end recording -------------------------')
def start_chart(data):
    # print('received message: ' + str(data), flush=sys.stdout)
    chart_cortex = Cortex(user)
    PROFILE_NAME = "RealBCI"
    STREAM = "pow"
    chart_cortex.do_prepare_steps()
    chart_cortex.setup_profile(PROFILE_NAME, 'load')
    chart_cortex.sub_request(stream=[STREAM], emit=emit)
Ejemplo n.º 4
0
class Train():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def train(self, profile_name, training_action, number_of_train):

        stream = ['sys']
        self.c.sub_request(stream)

        profiles = self.c.query_profile()

        if profile_name not in profiles:
            status = 'create'
            self.c.setup_profile(profile_name, status)

        status = 'load'
        self.c.setup_profile(profile_name, status)

        print('begin train -----------------------------------')
        num_train = 0
        while num_train < number_of_train:
            num_train = num_train + 1

            print('start training {0} time {1} ---------------'.format(
                training_action, num_train))
            print('\n')
            status = 'start'
            self.c.train_request(detection='mentalCommand',
                                 action=training_action,
                                 status=status)

            print('accept {0} time {1} ---------------'.format(
                training_action, num_train))
            print('\n')
            status = 'accept'
            self.c.train_request(detection='mentalCommand',
                                 action=training_action,
                                 status=status)

        print('save trained action')
        status = "save"
        self.c.setup_profile(profile_name, status)

        status = 'unload'
        self.c.setup_profile(profile_name, status)

    def live(self, profile_name):
        print('begin live mode ----------------------------------')
        # load profile
        status = 'load'
        self.c.setup_profile(profile_name, status)

        # sub 'com' stream and view live mode
        stream = ['com']
        self.c.sub_request(stream)
Ejemplo n.º 5
0
class TrainAdvance():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def get_active_action(self, profile_name):
        self.c.get_mental_command_active_action(profile_name)

    def get_command_brain_map(self, profile_name):
        self.c.get_mental_command_brain_map(profile_name)

    def get_training_threshold(self):
        self.c.get_mental_command_training_threshold(profile_name)
class LiveAdvance():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def live(self, profile_name):
        print('begin live mode ----------------------------------')
        self.c.setup_profile(profile_name=profile_name, status='load')
        self.c.sub_request(stream=['com'])

    def get_sensitivity(self, profile_name):
        self.c.get_mental_command_action_sensitivity(profile_name)

    def set_sensitivity(self, profile_name, values):
        self.c.set_mental_command_action_sensitivity(profile_name, values)
class Marker():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def add_markers(self, marker_numbers):
        now = ptb.GetSecs()

        for m in range(marker_numbers):
            mySound = sound.Sound('1000')
            marker_time = time.time() * 1000
            print('add marker at : ', marker_time)

            marker = {
                "label": str(m),
                "value": "sound-erp",
                "port": "python-app",
                "time": marker_time
            }
            mySound.play(when=now + 0.5)  # play in EXACTLY 0.5s
            self.c.inject_marker_request(marker)

            # add marker each seconds
            time.sleep(1.5)

    def demo_add_marker(self, record_export_folder, marker_numbers):
        # create record
        record_name = 'demo marker'
        record_description = 'demo marker'
        self.c.create_record(record_name, record_description)

        self.add_markers(marker_numbers)

        self.c.stop_record()

        self.c.disconnect_headset()

        # export record
        record_export_data_types = ['EEG', 'MOTION', 'PM', 'BP']
        record_export_format = 'CSV'
        record_export_version = 'V2'
        self.c.export_record(record_export_folder, record_export_data_types,
                             record_export_format, record_export_version,
                             [self.c.record_id])
Ejemplo n.º 8
0
class Marker():
    def __init__(self):
        self.c = Cortex(user, debug_mode=True)
        self.c.do_prepare_steps()

    def add_markers(self, marker_numbers):
        for m in range(marker_numbers):
            marker_time = time.time() * 1000
            print('add marker at : ', marker_time)

            marker = {
                "label": str(m),
                "value": "test_marker",
                "port": "python-app",
                "time": marker_time
            }

            self.c.inject_marker_request(marker)

            # add marker each seconds
            time.sleep(3)

    def demo_add_marker(self, record_export_folder, marker_numbers):
        # create record
        record_name = 'Marker video'
        record_description = 'test'
        self.c.create_record(record_name, record_description)

        self.add_markers(marker_numbers)

        self.c.stop_record()

        self.c.disconnect_headset()

        # export record
        record_export_data_types = ['EEG', 'MOTION', 'PM', 'BP']
        record_export_format = 'CSV'
        record_export_version = 'V2'
        self.c.export_record(record_export_folder, record_export_data_types,
                             record_export_format, record_export_version,
                             [self.c.record_id])
class Subcribe():
    def __init__(self):
        self.headers = {}
        self.objects = None
        self.count = 0
        self.tick = time.time()
        self.maxEventsInBuffer = 100
        self.stream_types = {}
        self.is_first_for_type = {
            "met": True,
            "eeg": True,
            "fac": True,
            "mot": True,
        }
        self.c = Cortex(user, debug_mode=False)
        self.c.do_prepare_steps()
        self.time_slice = 0
        self.time_data = {
            'engagement': {},
            'excitement': {},
            'stress': {},
            'relaxation': {},
            'interest': {},
            'focus': {}
        }
        self.time_slice_averages = {}

    def add_cognitive_data_to_time_aggregation(self, event):
        right_now = time.time()
        current_time = str(right_now * 1000)
        td = self.time_data
        for key in td.keys():
            new_value = event.get(key, None)
            if new_value:
                td[key][current_time] = new_value

        self.time_slice_averages = {}
        for key in td.keys():
            ds = td[key]
            keepers = {}
            for t in ds.keys():
                last_time = float(t) / 1000
                if (right_now - last_time) <= 30:
                    keepers[t] = ds[t]
            td[key] = keepers
            vals = keepers.values()
            self.time_slice_averages[key] = sum(vals) / len(vals)
        event["last_30s"] = self.time_slice_averages

    def prepare_metadata(self):
        metadata = self.c.session_context["result"]
        del metadata["recordIds"]
        del metadata["recording"]
        del metadata["id"]
        headset = metadata["headset"]
        del headset["motionSensors"]
        del headset["sensors"]
        del metadata["license"]
        del metadata["performanceMetrics"]
        del metadata["stopped"]
        del metadata["streams"]
        if self.c.user_id is None:
            self.c.user_id = metadata["owner"]
        self.c.device_id = headset["id"]
        self.c.component_id = headset[
            "virtualHeadsetId"] or "00000000-0000-0000-0000-000000000000"
        del metadata["owner"]
        del headset["id"]
        del headset["virtualHeadsetId"]

    def create_records_structure(self):
        o = {
            "id": str(uuid.uuid4()),
            "sessionId": self.c.session_id,
            "userId": self.c.user_id,
            "deviceId": self.c.device_id.lower(),
            "componentId": self.c.component_id,
            "ts": datetime.datetime.now().isoformat(),
            "metadata": {},
            "eeg": [],
            "cognitive": [],
            "facial": [],
            "motion": [],
        }
        metadata = self.c.session_context["result"]
        o["metadata"] = metadata
        return o

    def publish_records(self):
        my_keys = [
            'engagement', 'excitement', 'stress', 'relaxation', 'interest',
            'focus'
        ]
        if len(self.objects['cognitive']) > 0:
            event_data = self.objects
            rows = self.objects['cognitive']
            for x in range(len(rows)):
                row = rows[x]
                vals = []
                for y in range(len(my_keys)):
                    k = my_keys[y]
                    vals.append(str(row[k]))
                print(','.join(vals))
            # print(json.dumps(event_data, indent=4))
            send_kinesis(kinesis_client, kinesis_stream_name,
                         kinesis_shard_count, event_data)  # send it!

        self.objects = self.create_records_structure()
        self.count = 0
        self.tick = time.time()

    def has_all_keys(self, record, keys):
        answer = True
        for k in keys:
            answer = (answer and (k in record)) or False
        return answer

    def add_event(self, event, stream_name, sid):
        if stream_name == 'met':
            self.objects['cognitive'].append(event)
        elif stream_name == 'eeg':
            self.objects['eeg'].append(event)
        elif stream_name == 'fac':
            self.objects['facial'].append(event)
        elif stream_name == 'mot':
            self.objects['motion'].append(event)

    def map_met(self, event):
        if not event['engagementEnabled']:
            del event['engagement']
        if not event['excitementEnabled']:
            del event['excitement']
            del event['excitementLast1Min']
        if not event['stressEnabled']:
            del event['stress']
        if not event['relaxationEnabled']:
            del event['relaxation']
        if not event['interestEnabled']:
            del event['interest']
        if not event['focusEnabled']:
            del event['focus']
        del event['engagementEnabled']
        del event['excitementEnabled']
        del event['stressEnabled']
        del event['relaxationEnabled']
        del event['interestEnabled']
        del event['focusEnabled']
        self.add_cognitive_data_to_time_aggregation(event)

    def map_eeg(self, event):
        if not event['hasMarkers']:
            del event['markers']
            del event['hasMarkers']
        else:
            event['hasMarkers'] = True

        if not event['wasInterpolated']:
            del event['wasInterpolated']
        else:
            event['wasInterpolated'] = True

    def map_mot(self, e):
        if not e['wasInterpolated']:
            del e['wasInterpolated']
        else:
            e['wasInterpolated'] = True

    def is_facial_data_redundant(self, event):
        recent_facial_records = self.objects['facial']
        if len(recent_facial_records) <= 0:
            return False
        last_facial_record = recent_facial_records[len(recent_facial_records) -
                                                   1]
        compare_fields = [
            "eyes", "upperFace", "upperFacePower", "lowerFace",
            "lowerFacePower"
        ]
        is_identical = True
        for field in compare_fields:
            if not (event[field] == last_facial_record[field]):
                is_identical = False
        return is_identical

    def is_data_sample_relevant(self, event):
        n = event['n']
        # Get every 10th record using mathematics modulo
        return (n % 5) == 0

    def map_event(self, record, stream_name):
        event = {}
        sid = record['sid']
        time_value = record['time']
        headers = self.headers[stream_name]
        if stream_name in record:
            metrics = record[stream_name]
            for i in range(len(headers)):
                key = headers[i]
                mapped_key = field_name_mapping[key] or key
                val = metrics[i]
                event[mapped_key] = val
            event['ts'] = time_value

            if stream_name == 'met':
                self.map_met(event)
            elif stream_name == 'eeg':
                if self.is_data_sample_relevant(event):
                    self.map_eeg(event)
                else:
                    event = None
            elif stream_name == 'mot':
                if self.is_data_sample_relevant(event):
                    self.map_mot(event)
                else:
                    event = None
            elif stream_name == 'fac':
                if self.is_facial_data_redundant(event):
                    event = None
        return event, sid

    def get_record_type(self, record):
        record_type = None
        if 'eeg' in record: record_type = 'eeg'
        elif 'mot' in record: record_type = 'mot'
        elif 'fac' in record: record_type = 'fac'
        elif 'met' in record: record_type = 'met'
        return record_type

    def process_headers(self, record):
        header_data = record and record['result'] and record['result'][
            'success']
        for i in range(len(header_data)):
            header = header_data[i]
            cols = header['cols']
            stream_name = header['streamName']
            self.headers[stream_name] = cols

    def on_data_received(self, data):
        try:
            record = json.loads(data)
        except e as Error:
            print(e)
        sid = None
        stream_name = None

        # if the data record has a 'sid' field at the top level, then it is a summary
        record_type = self.get_record_type(record)
        if record_type:
            self.count = self.count + 1
            event, sid = self.map_event(record, record_type)
            if event:
                self.add_event(event, record_type, sid)
                current_time = time.time()
                # if (self.count >= self.maxEventsInBuffer) or ((current_time - self.tick) >= 1):
                if (current_time - self.tick) >= 1:
                    self.publish_records()
        else:
            # Otherwise this is a special header record with all of the columns defined
            self.process_headers(record)

    def start(self, user_id, streams):
        if self.c.ready_to_use:
            self.c.user_id = user_id
            self.prepare_metadata()
            self.c.add_callback(self.on_data_received)
            self.count = 0
            self.objects = self.create_records_structure()
            print(','.join([
                'engagement', 'excitement', 'stress', 'relaxation', 'interest',
                'focus'
            ]))
            self.c.sub_request(streams)
Ejemplo n.º 10
0
import ctypes
from time import sleep
import os
from command_handler import command_handler
from cca_handler import cca_handler
from pyboy_controller import pyboy_controller
import time


# Initialize our variables
use_csv = True
# TEMP: feel free to change how I do this. I say 'if not using csv then use emotiv' here and in #EACHFRAMEPOG - Matt
# NOTE: still will save a csv file even though I am reading from a csv file
if not use_csv:
    cortex = Cortex(None)
    cortex.do_prepare_steps()
    generator = cortex.sub_request(['eeg'])
    # generator = cortex.sub_request_pow(['pow'])
    next(generator).queue
    data_columns = ["P7", "O1", "O2", "P8", "TIME"]
    # data_columns = ["O1/theta","O1/alpha","O1/betaL","O1/betaH","O1/gamma",
    #                 "O2/theta","O2/alpha","O2/betaL","O2/betaH","O2/gamma", "TIME"]


# CSV CONTROL
if use_csv:
    recording_data = pd.read_csv('old_bad_recordings/first_target_0.csv')
    record_length = len(recording_data.index)
    channels = ['P7', 'O1', 'O2', 'P8'] # only data channels
    row_index = 0
    num_seconds = 3 # changing this will affect the time taken for each command
Ejemplo n.º 11
0
def test():
    cortex = Cortex(None)
    cortex.do_prepare_steps()