def getFeatures(self, eegSegment, timeStampSegment, time_step, local_mu,
                    local_sigma):

        targetBand = band(1, 12)
        deltaBand = band(2.5, 3.5)
        thetaBand = band(7.0, 8.0)
        wideThetaBand = band(6.5, 8.0)
        alphaBand = band(9.0, 9.5)
        smallDelta = 0.000000001

        #---------------
        # compute power spectrum and sort it
        powerSpect = np.abs(np.fft.fft(eegSegment))**2
        freqs = np.fft.fftfreq(len(powerSpect), d=time_step)
        idx = np.argsort(freqs)
        sortedFreqs = freqs[idx]
        sortedPowerSpect = powerSpect[idx]

        # print(' ')
        # print('in featureExtractorClassical.getFeaturesClassical():')
        # print(' time_step = ' + str(time_step))
        # print(' eegSegment = ' + str(eegSegment))
        # print(' powerSpect = ' + str(powerSpect))
        # print(' idx = ' + str(idx))
        # print(' freqs = ' + str(freqs))
        # print(' sortedFreqs = ' + str(sortedFreqs))
        # print(' sortedPowerSpect = ' + str(sortedPowerSpect))

        null = 0
        cv = local_sigma / (np.abs(local_mu) + smallDelta)
        # cv = local_sigma
        integral = targetBand.getSumPower(sortedFreqs, sortedPowerSpect)
        deltaPower = deltaBand.getSumPower(sortedFreqs, sortedPowerSpect)
        thetaPower = thetaBand.getSumPower(sortedFreqs, sortedPowerSpect)
        alphaPower = alphaBand.getSumPower(sortedFreqs, sortedPowerSpect)
        wideThetaPower = wideThetaBand.getSumPower(sortedFreqs,
                                                   sortedPowerSpect)
        deltaRatio = deltaPower / (deltaPower + thetaPower + alphaPower +
                                   smallDelta)
        thetaRatio = thetaPower / (deltaPower + smallDelta)
        ### return np.array([cv, integral, deltaRatio, deltaPower, alphaPower, thetaPower])
        return np.array([
            cv, integral, deltaRatio, deltaRatio, thetaRatio, deltaPower,
            integral, alphaPower, thetaPower, thetaPower, integral
        ])
Esempio n. 2
0
    def __init__(self, paramDir='', paramFileName='', outputDir=''):

        self.paramSetupType = 'directories'
        pathFilePath = open('path.json')
        p = json.load(pathFilePath)
        self.pathPrefix = p['pathPrefix']
        # print('self.pathPrefix = ', self.pathPrefix)
        # directory and file name
        if paramDir == '':
            if paramFileName == '':
                paramFilePath = self.pathPrefix + '/' + p[
                    'paramsDir'] + '/params.json'
            else:
                paramFilePath = self.pathPrefix + '/' + p[
                    'paramsDir'] + '/' + paramFileName
        else:
            paramFilePath = paramDir + '/' + paramFileName

        print('in ParameterSetup, paramFilePath =', paramFilePath)
        self.parameterFileHandler = open(paramFilePath)
        d = json.load(self.parameterFileHandler)
        if outputDir == '':
            self.classifierDir = self.pathPrefix + '/' + d['classifierDir']
            self.deepParamsDir = self.pathPrefix + '/' + d['deepParamsDir']
            self.predDir = self.pathPrefix + '/' + d['predDir']
            self.modelDirRoot = d['modelDirRoot']
        else:
            self.classifierDir = outputDir
            self.deepParamsDir = outputDir
            self.predDir = outputDir
            self.modelDirRoot = outputDir

        self.dataDir = self.pathPrefix + '/' + d['dataDir']
        self.pickledDir = self.pathPrefix + '/' + d['pickledDir']
        self.eegDir = self.pathPrefix + '/' + d['eegDir']
        self.featureDir = self.pathPrefix + '/' + d['featureDir']
        self.batchEvalDir = self.pathPrefix + '/' + d['batchEvalDir']
        self.standardMiceDir = self.pathPrefix + '/' + d['standardMiceDir']
        self.ksDir = self.pathPrefix + '/' + d['ksDir']
        self.finalClassifierDir = self.pathPrefix + '/' + d[
            'finalclassifierDir']
        self.waveOutputDir = self.pathPrefix + '/' + d['wavesDir']
        self.logDir = self.pathPrefix + '/' + d['logDir']
        self.postDir = self.pathPrefix + '/' + d['postDir']

        self.classifierPrefix = d['classifierPrefix']
        self.label4withEMG = d['label4withEMG']
        self.label4withoutEMG = d['label4withoutEMG']

        # for signal processing
        self.windowSizeInSec = d[
            'windowSizeInSec']  # size of window in time for estimating the state
        self.samplingFreq = d['samplingFreq']  # sampling frequency of data

        if 'graphUpdateFreqInHz' in d:
            self.graphUpdateFreqInHz = d['graphUpdateFreqInHz']
        else:
            self.graphUpdateFreqInHz = 1

        if 'terminalConfigDefaultValue' in d:
            self.terminal_config_default_value = d[
                'terminalConfigDefaultValue']
        else:
            self.terminal_config_default_value = 'RSE'

        if 'channelIDs' in d:
            self.channelIDs = d['channelIDs']
        else:
            self.channelIDs = [1, 0]

        self.writeWholeWaves = d[
            'writeWholeWaves']  # sampling frequency of data
        self.computeKS = d['computeKS']

        # for using history
        self.preContextSize = d[
            'preContextSize']  # number of time windows in EEG to look back in time
        self.postContextSize = d[
            'postContextSize']  # number of time windows in EEG to look back in time
        self.pastStageLookUpNum = d[
            'pastStageLookUpNum']  # number of stage labels to look back in time

        # for wavelets
        self.waveletWidths = d['waveletWidths']

        # for using EMG
        self.useEMG = d['useEMG']
        self.emgTimeFrameNum = d['emgTimeFrameNum']

        # for making a histogram
        self.wholeBand = band(d['bandMin'], d['bandMax'])
        self.binWidth4freqHisto = d[
            'binWidth4freqHisto']  # bin width in the frequency domain for visualizing spectrum as a histogram

        # file prefix
        self.eegFilePrefix = d['eegFilePrefix']
        self.trainDataFilePrefix = d['trainDataFilePrefix']
        self.featureFilePrefix = d['featureFilePrefix']
        self.classifierFilePrefix = d['classifierFilePrefix']

        # feature extractor
        self.extractorType = d['extractorType']
        self.lightPeriodStartTime = d['lightPeriodStartTime']

        # classifier
        self.classifierType = d['classifierType']
        self.networkType = d['networkType']
        # print('&%&%&%&% in ParameterSetup, self.networkType =', self.networkType)
        self.classifierParams = d['classifierParams']
        if self.useEMG:
            label4EMG = self.label4withEMG
        else:
            label4EMG = self.label4withoutEMG
        self.classifierName = self.classifierPrefix + '.' + label4EMG

        self.sampleClassLabels = d['sampleClassLabels']
        self.subsampleRatios = d['subsampleRatios']
        self.supersample = d['supersample']

        self.predict_by_batch = d['predict_by_batch']

        # self.replacesWWWRtoWWWW = d['replacesWWWRtoWWWW']
        self.numOfConsecutiveWsThatProhibitsR = d[
            'numOfConsecutiveWsThatProhibitsR']

        # stride size used for prediction
        self.timeWindowStrideInSec = d['timeWindowStrideInSec']
        # self.lookBackTimeWindowNum = d['lookBackTimeWindowNum']

        self.useRawData = d['useRawData']
        self.useFreqHisto = d['useFreqHisto']
        self.useTime = d['useTime']

        if 'useSTFT' in d:
            self.useSTFT = d['useSTFT']
        else:
            self.useSTFT = 0

        # parameters for the optimzer
        self.optimizerType = d['optimizerType']
        self.adam_learningRate = d['adam_learningRate']
        self.sgd_learningRate = d['sgd_learningRate']
        self.sgd_decay = np.float(d['sgd_decay'])
        self.sgd_momentum = d['sgd_momentum']

        # optimization parameters for deep learning
        self.deep_epochs = d['deep_epochs']
        self.deep_steps_per_epoch = d['deep_steps_per_epoch']
        self.deep_batch_size = d['deep_batch_size']

        # network structure for deep learning
        if 'torch_loss_function' in d:
            self.torch_loss_function = d['torch_loss_function']
        else:
            self.torch_loss_function = 'cross_entropy'
        self.torch_filter_nums = d['torch_filter_nums']
        self.torch_kernel_sizes = d['torch_kernel_sizes']
        self.torch_strides = d['torch_strides']
        self.torch_skip_by = d['torch_skip_by']
        self.torch_patience = d['torch_patience']
        if 'torch_lstm_length' in d:
            self.torch_lstm_length = d['torch_lstm_length']
        if 'torch_lstm_num_layers' in d:
            self.torch_lstm_num_layers = d['torch_lstm_num_layers']
        if 'torch_lstm_hidden_size' in d:
            self.torch_lstm_hidden_size = d['torch_lstm_hidden_size']
        if 'torch_lstm_inputDim' in d:
            self.torch_lstm_inputDim = d['torch_lstm_inputDim']
        if 'torch_lstm_bidirectional' in d:
            self.torch_lstm_bidirectional = d['torch_lstm_bidirectional']

        self.torch_resnet_layer_nums = d['torch_resnet_layer_nums']
        self.torch_resnet_conv_channels = d['torch_resnet_conv_channels']
        self.torch_resnet_output_channels_coeffs = d[
            'torch_resnet_output_channels_coeffs']
        self.torch_resnet_output_channels_coeffs = d[
            'torch_resnet_output_channels_coeffs']
        self.torch_resnet_resblock_stride_nums = d[
            'torch_resnet_resblock_stride_nums']
        self.torch_resnet_avg_pool_size = d['torch_resnet_avg_pool_size']

        self.deep_FCN_node_nums_by_layers = d['deep_FCN_node_nums_by_layers']
        self.deep_CNN_filter_nums_by_layers = d[
            'deep_CNN_filter_nums_by_layers']
        self.deep_CNN_kernel_sizes_by_layers = d[
            'deep_CNN_kernel_sizes_by_layers']
        self.deep_CNN_kernel_stride_sizes_by_layers = d[
            'deep_CNN_kernel_stride_sizes_by_layers']
        self.deep_skipConnectionLayerNum = d['deep_skipConnectionLayerNum']

        # dropoutRate
        self.dropoutRate = d['dropoutRate']

        # feature downsampling
        self.downsample_outputDim = d['downsample_outputDim']

        # features used in rawDataWithFreqHistoWithTime
        self.additionalFeatureDim = d['additionalFeatureDim']

        # markov order
        self.markovOrderForTraining = d['markovOrderForTraining']
        self.markovOrderForPrediction = d['markovOrderForPrediction']

        # number of stages to consider
        self.maximumStageNum = d['maximumStageNum']

        # maximum number of samples to be params_used
        self.maxSampleNum = d['maxSampleNum']

        if 'showCh2' in d:
            self.showCh2 = d['showCh2']
        else:
            self.showCh2 = True

        # replace R to W if EMG or some other motion indicator is larger
        # by this factor, when compared to the segment having the smallest value
        # of the indicator among all past segments.
        if 'useCh2ForReplace' in d:
            self.useCh2ForReplace = d['useCh2ForReplace']
        else:
            self.useCh2ForReplace = True

        self.ch2_thresh_default = d['ch2_thresh_default']
        if 'ch2IntensityFunc' in d:
            self.ch2IntensityFunc = d['ch2IntensityFunc']
        else:
            self.ch2IntensityFunc = 'max_mean'

        if 'stft_time_bin_in_seconds' in d:
            self.stft_time_bin_in_seconds = d['stft_time_bin_in_seconds']
        else:
            self.stft_time_bin_in_seconds = 1

        if 'outputDim_cnn_for_stft' in d:
            self.outputDim_cnn_for_stft = d['outputDim_cnn_for_stft']
        else:
            self.outputDim_cnn_for_stft = 8 * 3 * 2

        # label correction (dictionary)
        # self.classLabels = ['S', 'W', 'R']
        self.labelCorrectionDict = {
            'S': 'n',
            'W': 'w',
            'R': 'r',
            'RW': 'w',
            'M': 'm',
            'P': 'P',
            'F2': 'F2',
            '?': '?',
            '-': '-'
        }
        ### self.stageLabel2stageID = {'W': 0, 'S': 1, 'R': 2, 'M': 3, 'P': 4, 'RW': 5, 'F2' : 6}
        # self.stageLabels = ['W', 'S', 'R', 'M']
        # self.stageLabels4evaluation = ['W', 'S', 'R', 'M']
        self.capitalize_for_writing_prediction_to_file = {
            'n': '1',
            'w': 'W',
            'r': 'R',
            'RW': 'RW',
            'm': 'M',
            'p': 'P',
            'F2': 'F2',
            '?': '?'
        }
        self.capitalize_for_display = {
            'n': 'NREM',
            'w': 'Wake',
            'r': 'REM',
            'RW': 'RW',
            'm': 'M',
            'p': 'P',
            'F2': 'F2',
            '?': '?'
        }
        self.capitalize_for_graphs = {
            'n': 'S',
            'w': 'W',
            'r': 'R',
            'RW': 'RW',
            'm': 'M',
            'p': 'P',
            'F2': 'F2',
            '?': '?'
        }

        # for reading data files
        self.metaDataLineNumUpperBound4eeg = 100
        self.metaDataLineNumUpperBound4stage = 100
        self.cueWhereEEGDataStarts = 'Time'
        # self.cueWhereStageDataStarts = 'No.,Epoch'
        self.cueWhereStageDataStarts = ',,,%,%,uV^2,,uV^2'

        # ID for the classifierp
        # self.classifierID = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))

        orig_stageLabels = ['S', 'W', 'R', 'RW', 'M', 'P', 'F2', '?', '-']
        self.stagesByDepth = ['r', 'n', 'w', '?']
        self.stageLabel2stageID = {
            stage: stageID
            for stage, stageID in zip(orig_stageLabels[:self.maximumStageNum],
                                      range(self.maximumStageNum))
        }
        self.correctedLabel2depthID = {
            stage: stageID
            for stage, stageID in zip(self.stagesByDepth,
                                      range(len(self.stagesByDepth)))
        }
        ### self.stageLabels4evaluation = [key for key in self.stageLabel2stageID.keys()]
        self.stageLabels4evaluation = orig_stageLabels[:self.maximumStageNum]

        self.ch2_mean_init = d['ch2_mean_init']
        self.ch2_variance_init = d['ch2_variance_init']
        self.ch2_oldTotalSampleNum_init = d['ch2_oldTotalSampleNum_init']

        # used in statistics.py to set the range of past signal used for computing mean and std
        if 'standardization_max_storage_window_num' in d:
            self.standardization_max_storage_window_num = d[
                'standardization_max_storage_window_num']
        else:
            self.standardization_max_storage_window_num = 5000

        if 'standardization_trigger_interval' in d:
            self.standardization_trigger_interval = d[
                'standardization_trigger_interval']
        else:
            self.standardization_trigger_interval = 500

        if 'standardization_early_trigger' in d:
            self.standardization_early_trigger = d[
                'standardization_early_trigger']
        else:
            self.standardization_early_trigger = [
                10, 20, 30, 40, 50, 100, 150, 200, 300, 400
            ]
Esempio n. 3
0
metaDataLineNum4stage = 100
cueWhereEEGDataStarts = "Time"
cueWhereStageDataStarts = "No.,Epoch"

# for signal processing
wsizeInSec = 10  # size of window in time for estimating the state
samplingFreq = 128  # sampling frequency of data

# for training / test data extraction
# if trainWindowNumOrig = 0, all data is used for training.
### trainWindowNumOrig = 1500
### trainWindowNumOrig = 500
trainWindowNumOrig = 0

# for feature extraction
deltaBand = band(1, 4)
thetaBand = band(6, 9)
targetBands = (deltaBand, thetaBand)
lookBackWindowNum = 6

# for drawing spectrum
wholeBand = band(0, 16)
binWidth4freqHisto = 1  # bin width in the frequency domain for visualizing spectrum as a histogram
voltageRange = (-300, 300)
powerRange = (0, 2 * 10**8)
binnedPowerRange = (0, 2 * 10**9)
stage2color = {
    'W': 'b',
    'R': 'r',
    'S': 'k',
    '2': 'm',
metaDataLineNum4stage = 100
cueWhereEEGDataStarts = "Time"
cueWhereStageDataStarts = "No.,Epoch"

# for signal processing
windowSizeInSec = params.windowSizeInSec  # size of window in time for estimating the state
samplingFreq = params.samplingFreq  # sampling frequency of data

# for training / test data extraction
# if trainWindowNumOrig = 0, all data is used for training.
### trainWindowNumOrig = 1500
### trainWindowNumOrig = 500
trainWindowNumOrig = 0

# for feature extraction
deltaBand = band(1, 4)
thetaBand = band(6, 9)
targetBands = (deltaBand, thetaBand)
lookBackWindowNum = 6

# for drawing spectrum
wholeBand = params.wholeBand
binWidth4freqHisto = params.binWidth4freqHisto  # bin width in the frequency domain for visualizing spectrum as a histogram

voltageRange = (-300, 300)
powerRange = (0, 2 * 10**8)
binnedPowerRange = (0, 2 * 10**9)
stage2color = {
    'W': 'b',
    'R': 'r',
    'S': 'k',