Example #1
0
class EEGFromRabbitMQ(EEGSource):
    def __init__(self, host, port, user, password, virtualhost):
        super(EEGFromRabbitMQ, self).__init__()
        print('[ ] EEGFromRabbitMQ Started')
        # open json file

        # Access the CLODUAMQP_URL environment variable and parse it (fallback to localhost)
        self.rabbit = RabbitController(host, port, user, password, virtualhost)
        self.latest_data = None
        self.listening_task = threading.Thread(name="eeg_data", 
                                               target=self.listen)
        self.listening_task.start()

    def listen(self):
        self.rabbit.subscribe_eegdata(self.rabbitcallback)


    def rabbitcallback(self, ch, method, properties, body):
        if(body is None or body == ''):
            return
        command = json.loads(body)
        self.latest_data = EEGData(command['Values'])
        # print("received latest EEG Data: %s" %(repr(command['Values'])))
    # iterate samples
    def read_new_data(self):
        return self.latest_data
Example #2
0
    def predict_next_level(self):
        rabbit = RabbitController('localhost', 5672, 'guest', 'guest', '/')
        while not self._stop.is_set():
            self._stop.wait(EMIT_STAGE_PERIOD_SECONDS)
            prior_state = self.state
            try:
                data = np.array(self.queue, dtype=np.float64)
                model = arima_model.ARIMA(data, order=ARIMA_PARAMS)
                model = model.fit(disp=0)
                forecast = model.predict(start=1, end=20)
            except ValueError:
                print(
                    "Skipping state evaluation due to insufficient amount of data collected."
                )
                return
            data_filtered = data[np.where(
                np.logical_and(np.greater_equal(data, np.percentile(data, 5)),
                               np.less_equal(data, np.percentile(data, 95))))]
            mean_diff = np.mean(forecast) - np.mean(data_filtered)
            # add more logic there considering movement and blinks
            if mean_diff > UPPER_THRESHOLD:
                self.state = min(self.state + 1, 5)
                self.levels_advance_upwards = True  # auto advance up after upward transition
            elif mean_diff < LOWER_THRESHOLD:
                self.state = max(self.state - 1, 1)
                self.levels_advance_upwards = False  # auto advance down after downward transition

            # send to the bus
            if self.state != prior_state:
                print("[ ] EMITTING STATE: %s" % (self.state))
                rabbit.publish_state(self.state)
                self.state_last_published = datetime.datetime.now()

            with self.lock:
                self.blink_events = 0
Example #3
0
    def auto_advance_level(self):
        rabbit = RabbitController('localhost', 5672, 'guest', 'guest', '/')
        while not self._stop.is_set():
            self._stop.wait(AUTO_ADVANCE_LEVEL_PERIOD_SECONDS)

            # headset is worn if nonzero values exist in the queue
            headset_worn = False
            for value in self.raw_values[0:20]:
                if value > 0:
                    headset_worn = True
                    break

            seconds_since_state_publish = (
                datetime.datetime.now() -
                self.state_last_published).total_seconds()

            should_advance_headset_worn = headset_worn and seconds_since_state_publish > AUTO_ADVANCE_AFTER_SECONDS_WHILE_HEADSET_WORN
            should_advance_headset_off = not headset_worn and seconds_since_state_publish > AUTO_ADVANCE_AFTER_SECONDS_WHILE_HEADSET_OFF

            should_advance = should_advance_headset_worn or should_advance_headset_off

            if should_advance:
                if self.state >= 5:
                    self.levels_advance_upwards = False
                if self.state <= 1:
                    self.levels_advance_upwards = True

                next_state = self.state + 1 if self.levels_advance_upwards else self.state - 1
                self.state = next_state

                advancing_direction = "UPWARDS" if self.levels_advance_upwards else "DOWNWARDS"
                print("[ ] AUTOMATICALLY ADVANCING %s, EMITTING STATE: %s" %
                      (advancing_direction, self.state))
                rabbit.publish_state(self.state)
                self.state_last_published = datetime.datetime.now()
Example #4
0
 def update_rawvalues(self):
     rabbit = RabbitController('localhost', 5672, 'guest', 'guest', '/')
     while not self._stop.is_set():
         self._stop.wait(EMIT_EEGDATA_PERIOD_SECONDS)
         # set state in raw_valceiceilues
         self.raw_values[21] = int(self.state)
         # send to the bus
         print("[ ] EMITTING EEGDATA: %s" % (self.raw_values))
         rabbit.publish_eegdata(self.raw_values)
Example #5
0
    def __init__(self, host, port, user, password, virtualhost):
        super(EEGFromRabbitMQ, self).__init__()
        print('[ ] EEGFromRabbitMQ Started')
        # open json file

        # Access the CLODUAMQP_URL environment variable and parse it (fallback to localhost)
        self.rabbit = RabbitController(host, port, user, password, virtualhost)
        self.latest_data = None
        self.listening_task = threading.Thread(name="eeg_data", 
                                               target=self.listen)
        self.listening_task.start()
 def __init__(self, adapter, connection_address, polling_interval):
     self.rabbit = RabbitController('localhost', 5672, 'guest', 'guest',
                                    '/')
     self.polling_interval = polling_interval
     self.adapter = adapter
     self.connection_address = connection_address
     self.adapter.start()
     self.device = adapter.connect(connection_address)
     self.device.subscribe(RECEIVE_CHARACTERISTIC,
                           callback=self._receive_data_callback)
     self._stop = Event()
     self.heart_rate_value = 0
     signal.signal(signal.SIGINT, self._signal_handler)
Example #7
0
 def listen_for_keys(self):
     rabbit = RabbitController('localhost', 5672, 'guest', 'guest', '/')
     while not self._stop.is_set():
         self._stop.wait(LISTEN_FOR_KEY_SECONDS)
         if msvcrt.kbhit():
             ch = msvcrt.getch()
             if ch:
                 key = ch.decode()
                 if key in ['1', '2', '3', '4', '5']:
                     self.state = int(key)
                     print("[ ] PRESSED KEY '%s', EMITTING STATE: %s" %
                           (key, self.state))
                     rabbit.publish_state(self.state)
                     self.state_last_published = datetime.datetime.now()
Example #8
0
    def __init__(self,
                 audio_folder,
                 up_transition_sound_filename,
                 down_transition_sound_filename,
                 sample_rate=16000,
                 bus_host="localhost",
                 bus_port=5672,
                 bus_username='******',
                 bus_password='******',
                 bus_virtualhost='/'):
        """
		:param audio_folder: audio folder with sub folders, each numbers after a meditation state (e.g, 0, 1, 2, 3, ..).
							 Each folder represents a meditation stage and should contain any tracks
							 for that stage
		:param up_transition_sound_filename
		:param down_transition_sound_filename
		:param sample_rate: the sample rate for the sounds, defaults to 16 Khz
		"""
        self.SAMPLE_RATE = sample_rate
        self.audio_folder = audio_folder
        self.up_transition_sound_filename = up_transition_sound_filename
        self.down_transition_sound_filename = down_transition_sound_filename
        self.playing_tracks = []
        self.playing_heartbeats = []
        self.playing_transitions = []
        self.tracks_by_stage = defaultdict(list)
        self.performing_a_mix = False
        self.mixing_thread = None
        self.bus = RabbitController(bus_host, bus_port, bus_username,
                                    bus_password, bus_virtualhost)
        self.current_stage = None

        swmixer.init(samplerate=self.SAMPLE_RATE, chunksize=1024, stereo=True)
        swmixer.start()

        self.current_playing_track_filename = None
        self.tracks_last_playing_position = dict()

        self._validate_audio_files_and_prep_data()

        sound_controller_channel = self.bus.open_channel()
        self.bus.subscribe_meditation(
            self.process_meditation_state_command,
            existing_channel=sound_controller_channel)
        self.bus.subscribe_heart_rate(
            self.process_heart_rate_command,
            existing_channel=sound_controller_channel)
        logging.info("waiting for meditation state and heart rates messages..")
        sound_controller_channel.start_consuming()
Example #9
0
class EEGFromRabbitMQ(EEGSource):
    def __init__(self, host, port, user, password, virtualhost):
        super(EEGFromRabbitMQ, self).__init__()
        print('[ ] EEGFromRabbitMQ Started')
        # open json file

        # Access the CLODUAMQP_URL environment variable and parse it (fallback to localhost)
        self.rabbit = RabbitController(host, port, user, password, virtualhost)
        self.latest_data = None
        self.meditation_state_handler = None
        self.eeg_listening_task = threading.Thread(target=self.listen_eeg)
        self.eeg_listening_task.daemon = True
        self.eeg_listening_task.start()

        self.meditation_listening_task = threading.Thread(
            target=self.listen_meditation)
        self.meditation_listening_task.daemon = True
        self.meditation_listening_task.start()

    def listen_eeg(self):
        self.rabbit.subscribe_eegdata(self.rabbitcallback)

    def listen_meditation(self):
        self.rabbit.subscribe_meditation(self.meditationcallback)

    def rabbitcallback(self, ch, method, properties, body):
        if (body is None or body == ''):
            return
        command = json.loads(body)
        self.latest_data = EEGData(command['Values'])
        # print("received latest EEG Data: %s" %(repr(command['Values'])))

    # iterate samples
    def read_new_data(self):
        return self.latest_data

    def set_meditation_state_handler(self, handler):
        self.meditation_state_handler = handler

    def meditationcallback(self, ch, method, properties, body):
        print(("received meditation command with body \"{body}\"").format(
            body=body))

        command = json.loads(body)
        meditation_state = command['State']

        if self.meditation_state_handler is not None:
            self.meditation_state_handler(meditation_state)
Example #10
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     signal.signal(signal.SIGINT, self._signal_handler)
     self.rabbit = RabbitController('localhost', 5672, 'guest', 'guest',
                                    '/')
     self.queue = deque(
         maxlen=QUEUE_SIZE
     )  # we only use append, therefore no need in queue.Queue
     self.blink_events = 0  # counter of blink events
     self.state = None
     self.state_last_published = None
     self.levels_advance_upwards = True  # auto advance is moving upwards
     self.raw_values = [0] * 22
     self.lock = Lock()
     self._stop = Event()
     self._timer_thread = None
     self.start_emitting_messages()
Example #11
0
    def __init__(self, eeg_source, gui):
        print("[>] _INIT")
        self.eeg_source = eeg_source
        self.frame_index = 0
        self.speed = 0.4
        self.channels = 24
        self.sinelength = 300  # frames
        self.gui = gui
        self.maxfps = 60
        self.states_flames = []
        self.user_connected = False

        # init rabbitMQ connection
        self.rabbit = RabbitController('localhost', 5672, 'guest', 'guest',
                                       '/')

        self.input_controller = InputController(self)

        # attach keyboard events.
        self.input_controller.bind_keyboardevents(self.gui)

        # reference to global or defined herebefore
        self.retreive_params()
        self.flame = self.states_flames[0]
        # set these 3 to start fractal interpolation
        self.transition_pct = None
        self.transition_from = None
        self.transition_to = None
        # sets the frame at which the user disconnected
        self.disconnected_at = None

        # Init and transition so animation begins right away
        self.set_meditation_state(1, False)
        self.set_meditation_state(set_next=True)

        # Listen to meditation state events
        self.eeg_source.set_meditation_state_handler(self.set_meditation_state)
Example #12
0
class ThreadingOscUDPServer(socketserver.ThreadingMixIn, OscUDPServer):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        signal.signal(signal.SIGINT, self._signal_handler)
        self.rabbit = RabbitController('localhost', 5672, 'guest', 'guest',
                                       '/')
        self.queue = deque(
            maxlen=QUEUE_SIZE
        )  # we only use append, therefore no need in queue.Queue
        self.blink_events = 0  # counter of blink events
        self.state = None
        self.state_last_published = None
        self.levels_advance_upwards = True  # auto advance is moving upwards
        self.raw_values = [0] * 22
        self.lock = Lock()
        self._stop = Event()
        self._timer_thread = None
        self.start_emitting_messages()

    def increment_blink(self):
        with self.lock:
            self.blink_events += 1

    def _signal_handler(self, _, unused_frame):
        self._stop.set()

    def start_emitting_messages(self):
        self.state = 1
        self.rabbit.publish_state(self.state)
        self.state_last_published = datetime.datetime.now()
        Thread(target=self.predict_next_level, daemon=True).start()
        Thread(target=self.update_rawvalues, daemon=True).start()
        Thread(target=self.listen_for_keys, daemon=True).start()
        Thread(target=self.auto_advance_level, daemon=True).start()

    def predict_next_level(self):
        while not self._stop.is_set():
            self._stop.wait(EMIT_STAGE_PERIOD_SECONDS)
            try:
                data = np.array(self.queue, dtype=np.float64)
                model = arima_model.ARIMA(data, order=ARIMA_PARAMS)
                model = model.fit(disp=0)
                forecast = model.predict(start=1, end=20)
            except ValueError:
                print(
                    "Skipping state evaluation due to insufficient amount of data collected."
                )
                return
            data_filtered = data[np.where(
                np.logical_and(np.greater_equal(data, np.percentile(data, 5)),
                               np.less_equal(data, np.percentile(data, 95))))]
            mean_diff = np.mean(forecast) - np.mean(data_filtered)
            # add more logic there considering movement and blinks
            if mean_diff > UPPER_THRESHOLD:
                self.state = min(self.state + 1, 5)
                self.levels_advance_upwards = True  # auto advance up after upward transition
            elif mean_diff < LOWER_THRESHOLD:
                self.state = max(self.state - 1, 1)
                self.levels_advance_upwards = False  # auto advance down after downward transition

            # send to the bus
            print("[ ] EMITTING STATE: %s" % (self.state))
            self.rabbit.publish_state(self.state)
            self.state_last_published = datetime.datetime.now()

            with self.lock:
                self.blink_events = 0

    def update_rawvalues(self):
        while not self._stop.is_set():
            self._stop.wait(EMIT_EEGDATA_PERIOD_SECONDS)
            # set state in raw_valceiceilues
            self.raw_values[21] = int(self.state)
            # send to the bus
            print("[ ] EMITTING EEGDATA: %s" % (self.raw_values))
            self.rabbit.publish_eegdata(self.raw_values)

    def listen_for_keys(self):
        while not self._stop.is_set():
            self._stop.wait(LISTEN_FOR_KEY_SECONDS)
            if msvcrt.kbhit():
                ch = msvcrt.getch()
                if ch:
                    key = ch.decode()
                    if key in ['1', '2', '3', '4', '5']:
                        self.state = int(key)
                        print("[ ] PRESSED KEY '%s', EMITTING STATE: %s" %
                              (key, self.state))
                        self.rabbit.publish_state(self.state)
                        self.state_last_published = datetime.datetime.now()

    def auto_advance_level(self):
        while not self._stop.is_set():
            self._stop.wait(AUTO_ADVANCE_LEVEL_PERIOD_SECONDS)

            # headset is worn if nonzero values exist in the queue
            headset_worn = False
            for value in self.raw_values[0:20]:
                if value > 0:
                    headset_worn = True
                    break

            seconds_since_state_publish = (
                datetime.datetime.now() -
                self.state_last_published).total_seconds()

            should_advance_headset_worn = headset_worn and seconds_since_state_publish > AUTO_ADVANCE_AFTER_SECONDS_WHILE_HEADSET_WORN
            should_advance_headset_off = not headset_worn and seconds_since_state_publish > AUTO_ADVANCE_AFTER_SECONDS_WHILE_HEADSET_OFF

            should_advance = should_advance_headset_worn or should_advance_headset_off

            if should_advance:
                if self.state >= 5:
                    self.levels_advance_upwards = False
                if self.state <= 1:
                    self.levels_advance_upwards = True

                next_state = self.state + 1 if self.levels_advance_upwards else self.state - 1
                self.state = next_state

                advancing_direction = "UPWARDS" if self.levels_advance_upwards else "DOWNWARDS"
                print("[ ] AUTOMATICALLY ADVANCING %s, EMITTING STATE: %s" %
                      (advancing_direction, self.state))
                self.rabbit.publish_state(self.state)
                self.state_last_published = datetime.datetime.now()
Example #13
0
import datetime

from flask import Flask, redirect, request, render_template, send_from_directory, jsonify
from common.rabbit_controller import RabbitController
from api import API
from bus import Bus
from context import Context

rabbit = RabbitController('localhost', 5672, 'guest', 'guest', '/')
bus = Bus(rabbit)
api = API(bus)
context = Context(bus)

app = Flask(__name__)


@app.route('/', methods=['GET'])
def landing_page():
    ctx = context.fetch(request)

    if 'success' in request.args:
        ctx['alert_success'] = request.args['success']
    if 'error' in request.args:
        ctx['alert_error'] = request.args['error']

    return render_template('landing_page.html', **ctx)


@app.route('/static/<path:path>', methods=['GET'])
def send_static(path):
    """ Static serving for static directory """
class HeartrateSensorService(object):
    def __init__(self, adapter, connection_address, polling_interval):
        self.rabbit = RabbitController('localhost', 5672, 'guest', 'guest',
                                       '/')
        self.polling_interval = polling_interval
        self.adapter = adapter
        self.connection_address = connection_address
        self.adapter.start()
        self.device = adapter.connect(connection_address)
        self.device.subscribe(RECEIVE_CHARACTERISTIC,
                              callback=self._receive_data_callback)
        self._stop = Event()
        self.heart_rate_value = 0
        signal.signal(signal.SIGINT, self._signal_handler)

    def restart(self):
        self._stop.set()
        try:
            self.device.disconnect()
            self.adapter.clear_bond()
        except pygatt.exceptions.NotConnectedError:
            pass
        self.adapter.start()
        self.device = adapter.connect(self.connection_address)
        self.device.subscribe(RECEIVE_CHARACTERISTIC,
                              callback=self._receive_data_callback)
        self._stop.clear()
        self.publish_heart_rate_data()

    def publish_heart_rate_data(self):
        while not self._stop.is_set():
            try:
                if self.heart_rate_value:
                    print("publishing HR " + str(self.heart_rate_value))
                    self.rabbit.publish_heart(self.heart_rate_value)
                time.sleep(self.polling_interval)
            except Exception:
                logging.exception(
                    "Exception while sending data to the message queue:")

    def _signal_handler(self, *args):
        self._stop.set()
        self.device.disconnect()
        self.adapter.stop()

    def _receive_data_callback(self, _, raw_data):
        try:
            packet_dict = {}
            # byte_1_data_container = PARSING_SCHEMA[0].parse(to_bytes(raw_data[0], 1, 'little'))
            # byte_3_data_container = PARSING_SCHEMA[2].parse(to_bytes(raw_data[2], 1, 'little'))
            # packet_dict['signal_strength'] = byte_1_data_container['signal_strength']
            # packet_dict['has_signal'] = byte_1_data_container['has_signal']
            # packet_dict['bargraph'] = byte_3_data_container['bargraph']
            # packet_dict['no_finger'] = byte_3_data_container['no_finger']
            # packet_dict['spo2'] = raw_data[4]
            pleth = int(raw_data[1])
            pulse_rate = int(raw_data[3] | ((raw_data[2] & 0x40) << 1))
            ts = str(datetime.datetime.now())
            # hr_object = HeartRateData(**packet_dict)
            if pleth >= 100 or pulse_rate >= 110:
                # most likely corrupted data
                return
            self.heart_rate_value = pulse_rate
        except Exception as e:
            logging.debug('Error on parsing raw heart rate data:')
            logging.exception(e)
Example #15
0
class MindMurmurSoundScapeController(object):
    """ Non-blocking Audio controller to mix tracks and play sound scapes and heartbeats
	according to input meditation stage

	NOTE: Currently supporting only tracks with sample rate of 16,000 hz
	"""
    SAMPLE_RATE = 16000

    FADEIN_MAX_AFTER_SIX_SECONDS_RATE = SAMPLE_RATE * 12
    FADEOUT_MAX_AFTER_SIX_SECONDS_RATE = SAMPLE_RATE * 12

    TRACKS_KEY = "tracks"
    HEARTBEAT_KEY = "heartbeat"
    HEARTBEAT_SOUND_FILENAME = "heartbeat"
    STAGE_NAME_SPLITTER = "::"

    def __init__(self,
                 audio_folder,
                 up_transition_sound_filename,
                 down_transition_sound_filename,
                 sample_rate=16000,
                 bus_host="localhost",
                 bus_port=5672,
                 bus_username='******',
                 bus_password='******',
                 bus_virtualhost='/'):
        """
		:param audio_folder: audio folder with sub folders, each numbers after a meditation state (e.g, 0, 1, 2, 3, ..).
							 Each folder represents a meditation stage and should contain any tracks
							 for that stage
		:param up_transition_sound_filename
		:param down_transition_sound_filename
		:param sample_rate: the sample rate for the sounds, defaults to 16 Khz
		"""
        self.SAMPLE_RATE = sample_rate
        self.audio_folder = audio_folder
        self.up_transition_sound_filename = up_transition_sound_filename
        self.down_transition_sound_filename = down_transition_sound_filename
        self.playing_tracks = []
        self.playing_heartbeats = []
        self.playing_transitions = []
        self.tracks_by_stage = defaultdict(list)
        self.performing_a_mix = False
        self.mixing_thread = None
        self.bus = RabbitController(bus_host, bus_port, bus_username,
                                    bus_password, bus_virtualhost)
        self.current_stage = None

        swmixer.init(samplerate=self.SAMPLE_RATE, chunksize=1024, stereo=True)
        swmixer.start()

        self.current_playing_track_filename = None
        self.tracks_last_playing_position = dict()

        self._validate_audio_files_and_prep_data()

        sound_controller_channel = self.bus.open_channel()
        self.bus.subscribe_meditation(
            self.process_meditation_state_command,
            existing_channel=sound_controller_channel)
        self.bus.subscribe_heart_rate(
            self.process_heart_rate_command,
            existing_channel=sound_controller_channel)
        logging.info("waiting for meditation state and heart rates messages..")
        sound_controller_channel.start_consuming()

    def process_meditation_state_command(self, channel, method, properties,
                                         body):
        logging.info(
            ("received meditation command with body \"{body}\"").format(
                body=body))

        command = MeditationStateCommand.from_string(body)

        desired_stage = command.get_state()
        logging.info(
            "parsing request to transition to stage \"{desired_stage}\"".
            format(desired_stage=desired_stage))

        if desired_stage == self.current_stage:
            logging.info("requested stage is already playing, ignoring")
        else:
            stage_track = self._get_meditation_stage_soundscape_track_for_stage(
                desired_stage)
            stage_change_direction = desired_stage - (self.current_stage or 1)

            transition_track = (self.up_transition_sound_filename
                                if stage_change_direction > 0 else
                                self.down_transition_sound_filename)

            self._mix_track(stage_track, transition_track)
            self.current_stage = desired_stage

    def process_heart_rate_command(self, channel, method, properties, body):
        logging.info(
            ("received heart rate command with body \"{body}\"").format(
                body=body))

        command = HeartRateCommand.from_string(body)

        logging.info(
            "parsing request to play heartbeat for current stage ({current_stage})"
            .format(current_stage=self.current_stage))
        self.play_heartbeat_for_stage()

    def stop_all_sounds(self):
        for track in self.playing_tracks + self.playing_heartbeats + self.playing_transitions:
            if not track.done:
                track.stop()

    def _validate_audio_files_and_prep_data(self):
        """ Validates "audio_folder" structure to be of subfolders named after mediation stage and have at least one
		track in them

		"""
        if not os.path.isfile(self.up_transition_sound_filename):
            raise ValueError(
                "{up_transition_sound_filename} is not a file".format(
                    up_transition_sound_filename=self.
                    up_transition_sound_filename))

        if not os.path.isfile(self.down_transition_sound_filename):
            raise ValueError(
                "{down_transition_sound_filename} is not a file".format(
                    down_transition_sound_filename=self.
                    down_transition_sound_filename))

        if not os.path.isdir(self.audio_folder):
            raise ValueError(
                "{folder} folder does not exists on system".format(
                    folder=self.audio_folder))

        for dir_name in os.listdir(self.audio_folder):
            full_dir_path = os.path.join(self.audio_folder, dir_name)

            if not os.path.isdir(full_dir_path):
                raise ValueError(
                    "{dir_name} is not a folder".format(dir_name=dir_name))

            try:
                stage_number = int(dir_name)
            except:
                raise ValueError(
                    "{dir_name} is not a legal name".format(dir_name=dir_name))

            tracks = []
            heartbeat_track = None

            for filename in os.listdir(full_dir_path):
                full_filename = os.path.join(full_dir_path, filename)

                if not os.path.isfile(full_filename):
                    raise ValueError(
                        "{filename} is not a file".format(filename=filename))
                elif self.HEARTBEAT_SOUND_FILENAME in filename:
                    heartbeat_track = full_filename
                else:
                    tracks.append(full_filename)

            if len(tracks) == 0:
                raise ValueError(
                    ("{dir_name} folder doesn't contain tracks").format(
                        dir_name=dir_name))
            elif heartbeat_track is None:
                raise ValueError(
                    "No heartbeat track located for phase {!s}".format(
                        stage_number))
            else:
                logging.info("Got data for stage \"{stage_number}\"".format(
                    stage_number=stage_number))
                self.tracks_by_stage[stage_number] = {
                    self.TRACKS_KEY: tracks,
                    self.HEARTBEAT_KEY: heartbeat_track
                }

        logging.info("using audio folder: {audio_folder}".format(
            audio_folder=self.audio_folder))

    def _mix_track(self,
                   track_filename,
                   stage_change_indication_filename,
                   play_from_second=None):
        """ Mix a track with path "track filename" to play. If another track is playing, fade it out, and fade this in.

		Overall, this takes 15 seconds:
		0:00 - 0:06: playing track fades out
		0:04 - 0:13: transition sounds plays
		0:09 - 0:15: next track mixes in

		:param track_filename: track filename to fade in
		:param stage_change_direction: int, used for transition change indication sound
		:param play_from_second: the time in the track to jump to (just for testing purposes, shouldn't be used otherwise)
		"""
        logging.info("starting mixing")
        start_time_seconds = time.time()

        if len(self.playing_tracks) > 0:
            logging.info(
                "fading out current track to end in {seconds} seconds".format(
                    seconds=self.FADEOUT_MAX_AFTER_SIX_SECONDS_RATE /
                    self.SAMPLE_RATE / 2.0))
            track_current_position = (self.playing_tracks[-1].get_position() +
                                      self.FADEOUT_MAX_AFTER_SIX_SECONDS_RATE)
            self.tracks_last_playing_position[
                self.current_playing_track_filename] = track_current_position

            self.playing_tracks[-1].set_volume(
                0, fadetime=self.FADEOUT_MAX_AFTER_SIX_SECONDS_RATE)

            time.sleep(4 - (time.time() - start_time_seconds))

            # play transition indication
            logging.info("playing transition")
            transition_indication_sound = swmixer.Sound(
                stage_change_indication_filename)
            transition_channel = transition_indication_sound.play(volume=0.1)
            self.playing_transitions.append(transition_channel)

            time.sleep(9 - (time.time() - start_time_seconds))

            self.playing_tracks[-1].stop()

        # mix in sound scape for stage
        track_sound = swmixer.Sound(track_filename)
        offset = 0

        if play_from_second is not None:
            offset = self.SAMPLE_RATE * play_from_second

        self.current_playing_track_filename = track_filename

        track_last_position = self.tracks_last_playing_position.get(
            track_filename, None)
        track_channel = track_sound.play(
            fadein=self.FADEIN_MAX_AFTER_SIX_SECONDS_RATE,
            offset=track_last_position or offset,
            loops=100)
        self.playing_tracks.append(track_channel)

        logging.info(
            "starting playing \"{track}\" from {seconds} seconds in".format(
                track=track_filename.split("/")[-1],
                seconds=(track_last_position or offset) / self.SAMPLE_RATE /
                2.0))

        self.performing_a_mix = False

    def _play_heartbeat(self, heartbeat_track):
        """ Play "heartbeat_track"

		:param heartbeat_track: the filename on system to play
		"""
        heartbeat_sound = swmixer.Sound(heartbeat_track)
        heartbeat_channel = heartbeat_sound.play(volume=1.1)
        self.playing_heartbeats.append(heartbeat_channel)
        logging.info("starting playing heartbeat \"{heartbeat_track}\"".format(
            heartbeat_track=heartbeat_track.split("/")[-1]))

    def _get_meditation_stage_soundscape_track_for_stage(self, stage):
        return self.tracks_by_stage[stage][self.TRACKS_KEY][0]

    def _get_meditation_stage_heartbeat_track_for_stage(self, stage):
        return self.tracks_by_stage[stage][self.HEARTBEAT_KEY]

    def set_meditation_stage(self,
                             from_stage,
                             to_stage,
                             play_from_second=None):
        """

		:param from_stage: int, the stage we in
		:param to_stage: int, the stage we're going to
		:param play_from_second: the time in the track to jump to (just for testing purposes, shouldn't be used otherwise)
		:return:
		"""
        stage_change_direction = to_stage - from_stage

        if stage_change_direction == 0:
            logging.info("stage is the same as requested, ignoring")
        else:
            if not self.performing_a_mix:
                try:
                    self.performing_a_mix = True

                    stage_track = self._get_meditation_stage_soundscape_track_for_stage(
                        to_stage)
                    transition_track = (self.up_transition_sound_filename
                                        if stage_change_direction > 0 else
                                        self.down_transition_sound_filename)

                    self.mixing_thread = Thread(
                        target=self._mix_track,
                        args=[stage_track, transition_track, play_from_second])
                    self.mixing_thread.start()
                except Exception:
                    self.performing_a_mix = False
            else:
                logging.info("already in stage transition, ignoring")

    def play_heartbeat_for_stage(self):
        stage_heartbeat = self._get_meditation_stage_heartbeat_track_for_stage(
            self.current_stage or 1)
        self._play_heartbeat(stage_heartbeat)