示例#1
0
 def __init__(self, nParticipant, from_file = False):
     self.nParticipant = nParticipant
     self.events = EventLogger()
     self.ratings = ParticipantRatings(self.nParticipant)
     self.featureVectors = {}  #  {1:{'Fp1_theta': 2453476, 'Fp1_slow_alpha': 482418, ... , 'avgSkinRes': 69'}}
     self.featureDF = pd.DataFrame  #  use special function
     self.Y = {}
     for trial in range(1,41):
         self.featureVectors[trial] = {}
         self.Y[trial] = 0
     #  seed and variables for splitting data
     self.randomSeed = random.randint(1, 1000000)
     self.X_train = {}
     self.X_test = {}
     self.X_validation = {}
     self.Y_train = {}
     self.Y_test = {}
     self.Y_validation = {}
     #  if we not use precomputed feature from file - compute it from raw signals
     if not from_file:
         self.ratings = ParticipantRatings(self.nParticipant)
         self.physSignalsFeatures = ParticipantSignalsFeatures(self.nParticipant)
         self.physSignalsFeatures.computeFeatures(range(1, 41), range(1, 33), BANDS, FREQ, 0, 8063,
                                                  ASYM_ELECTRODE_PAIRS, ASYM_BANDS)
     else:
         self.loadFeatureVectorsFromCSV()
         self.convertFeatureVectorsToDataFrame()
示例#2
0
 def initPanel(self):
     _logger.info('initPanel')
     self.addHandler()
     g_statsCollector.eventHandlers += self.onEvent
     g_statsCollector.start()
     g_statsCollector.updateArenaInfo()
     clientStatus = g_statsCollector.clientStatus
     self.__panels = []
     self.__keyHandlers = {}
     for paneldef in self.__config.get('panelDefs', []):
         if paneldef['channel'] == 'indicator':
             panel = StatsIndicator(paneldef, clientStatus)
             if 'events' in paneldef:
                 self.__eventHandlers += panel.onEvent
             else:
                 self.__intervalHandlers += panel.update
             if 'toggleKey' in paneldef['style']:
                 keyName = paneldef['style']['toggleKey']
                 keyId = getattr(Keys, keyName)
                 if keyId not in self.__keyHandlers:
                     self.__keyHandlers[keyId] = Event()
                 self.__keyHandlers[keyId] += panel.toggle
         elif paneldef['channel'] == 'status':
             panel = StatsLogger(paneldef, clientStatus)
             self.__intervalHandlers += panel.update
         elif paneldef['channel'] == 'event':
             panel = EventLogger(paneldef, clientStatus)
             self.__eventHandlers += panel.onEvent
         self.__panels.append(panel)
     session = dependency.instance(IBattleSessionProvider)
     ctrl = session.shared.crosshair
     self.changeView(ctrl.getViewID())
     self.updateScreenPosition()
     self.updateCrosshairPosition()
示例#3
0
 def __init__(self, stateDir, logDir, name):
     self._name = name
     self._logDir = logDir
     ensureDirectory(logDir)
     ensureDirectory(stateDir)
     self._ids = Ids(stateDir, name)
     self._invalidIds = Ids(stateDir, name + "_invalid")
     self._state = State(stateDir, name)
     self._eventlogger = EventLogger(logDir + '/' + name +'.events')
     self._resetCounts()
示例#4
0
 def __init__(self, nParticipant, from_file = False):
     self.nParticipant = nParticipant
     self.events = EventLogger()
     self.ratings = ParticipantRatings(self.nParticipant)
     self.featureVectors = {}  #
     self.featureDF = pd.DataFrame  # use special function
     self.Y = {}
     for trial in range(1, 41):
         self.featureVectors[trial] = {}
         self.Y[trial] = 0
     #  seed and variables for splitting data
     self.randomSeed = random.randint(1, 1000000)
     self.X_train = {}
     self.X_test = {}
     self.X_validation = {}
     self.Y_train = {}
     self.Y_test = {}
     self.Y_validation = {}
     #  if we not use precomputed feature from file - compute it from raw signals
     if not from_file:
         self.featureVectors = self.unpackEEGDataToVectors()
     else:
         self.loadFeatureVectorsFromCSV()
         self.convertFeatureVectorsToDataFrame()
示例#5
0
class ParticipantFeatureVectors:
    def __init__(self, nParticipant, from_file = False):
        self.nParticipant = nParticipant
        self.events = EventLogger()
        self.ratings = ParticipantRatings(self.nParticipant)
        self.featureVectors = {}  #  {1:{'Fp1_theta': 2453476, 'Fp1_slow_alpha': 482418, ... , 'avgSkinRes': 69'}}
        self.featureDF = pd.DataFrame  #  use special function
        self.Y = {}
        for trial in range(1,41):
            self.featureVectors[trial] = {}
            self.Y[trial] = 0
        #  seed and variables for splitting data
        self.randomSeed = random.randint(1, 1000000)
        self.X_train = {}
        self.X_test = {}
        self.X_validation = {}
        self.Y_train = {}
        self.Y_test = {}
        self.Y_validation = {}
        #  if we not use precomputed feature from file - compute it from raw signals
        if not from_file:
            self.ratings = ParticipantRatings(self.nParticipant)
            self.physSignalsFeatures = ParticipantSignalsFeatures(self.nParticipant)
            self.physSignalsFeatures.computeFeatures(range(1, 41), range(1, 33), BANDS, FREQ, 0, 8063,
                                                     ASYM_ELECTRODE_PAIRS, ASYM_BANDS)
        else:
            self.loadFeatureVectorsFromCSV()
            self.convertFeatureVectorsToDataFrame()

    def fillFeatureVectors(self):
        self.addEEGSpectralToFeatureVector()
        self.addEEGAsymetryToFeatureVector()
        self.addGSRToFeatureVector()

    def createYVector(self, yType = 'f'):
        self.Y = {}
        if yType == 'f':
            if self.ratings.getFamiliarity() != []:
                for trial in self.featureVectors.keys():
                    self.Y[trial] = self.ratings.familiarity.reset_index(drop=True).iloc[trial-1]
        elif yType == 'a':
            self.ratings.getArousal()
            for trial in self.featureVectors.keys():
                self.Y[trial] = self.ratings.arousal.reset_index(drop=True).iloc[trial - 1]
        elif yType == 'v':
            self.ratings.getValence()
            for trial in self.featureVectors.keys():
                self.Y[trial] = self.ratings.valence.reset_index(drop=True).iloc[trial - 1]
        elif yType == 'l':
            self.ratings.getLiking()
            for trial in self.featureVectors.keys():
                self.Y[trial] = self.ratings.liking.reset_index(drop=True).iloc[trial - 1]
        elif yType == 'd':
            self.ratings.getDominance()
            for trial in self.featureVectors.keys():
                self.Y[trial] = self.ratings.dominance.reset_index(drop=True).iloc[trial - 1]
        elif yType == 'save':
            for type in ['f', 'a', 'v', 'd', 'l']:
                self.createYVector(yType=type)
                self.saveYVectorToCSV(yType=type)
        else:
            print('No such Y vector type')

    def convertFeatureVectorsToDataFrame(self):
        self.featureDF = pd.DataFrame.from_dict(self.featureVectors, orient='index')

    def addEEGSpectralToFeatureVector(self):
        for trial in self.featureVectors.keys():
            for electrode in range(len(EEG_CHANELS)):
                for band in BANDS.keys():
                    feature_name = EEG_CHANELS[electrode] + '_' + band
                    self.featureVectors[trial][feature_name] = \
                    self.physSignalsFeatures.spectralEEGFeatures[trial][electrode+1][band]

    def addEEGAsymetryToFeatureVector(self):
        for trial in self.featureVectors.keys():
            for electrodePair in ASYM_ELECTRODE_PAIRS:
                leftE, rightE = electrodePair
                for band in ASYM_BANDS.keys():
                    feature_name = EEG_CHANELS[leftE-1] + '-' + EEG_CHANELS[rightE-1] + '_' + band
                    self.featureVectors[trial][feature_name] = \
                    self.physSignalsFeatures.spectralEEGAsymetry[trial][leftE][band]

    def addGSRToFeatureVector(self):
        for trial in self.featureVectors.keys():
            self.featureVectors[trial]['avgSkinRes'] = self.physSignalsFeatures.averageSkinResistance[trial]

    def randomSplitSetForTraining(self, train=70, test=30, validation=0, seed=None):
        '''
        Split self.featureVectors and self.Y in random train, test and validation parts in a given proportions
        :param train: proportion of train part, default 70
        :param test: proportion of test part, default 30
        :param validation: proportion of validation part, default 0
        :param seed: seed for random, if None - there will be self.randomSeed used
        :return: self.X_train, self.X_test, self.X_validation, self.Y_train, self.Y_test, self.Y_validation - feature
        and target variable set divided in a given proportion
        '''
        #  init and fill proportion variables
        self.trainPart = train
        self.testPart = test
        self.validationPart = validation

        #  get random sample from feature vector index of test proportion length
        if seed is None:
            seed = self.randomSeed
        random.seed(seed)
        train_index = self.featureVectors.keys()
        test_index = random.sample(train_index,
                                   round(len(self.featureVectors.keys()) * test / (train + test + validation)))
        test_index.sort() #  to have ordered index
        train_index = [item for item in train_index if item not in test_index]

        #  Not all model requires validation set, so we could skip it creation in such case
        if validation != 0:
            validation_index = random.sample(train_index, round(
                len(self.featureVectors.keys()) * validation / (train + test + validation)))
            validation_index.sort()
            train_index = [item for item in train_index if item not in validation_index]

        #  create dict by created index
        self.X_train = {key: self.featureVectors[key] for key in train_index}
        try:
            self.Y_train = {key: self.Y[key] for key in train_index}
        except KeyError:
            errorMsg = 'Participant {} self.Y is empty, so no data for {}'.format(str(self.nParticipant),
                                                                                  'self.Y_train')
            self.events.addEvent(204, errorMsg)
            print(errorMsg)
        self.X_test = {key: self.featureVectors[key] for key in test_index}
        try:
            self.Y_test = {key: self.Y[key] for key in test_index}
        except KeyError:
            errorMsg = 'Participant {} self.Y is empty, so no data for {}'.format(str(self.nParticipant), 'self.Y_text')
            self.events.addEvent(204, errorMsg)
            print(errorMsg)
        if validation != 0:
            self.X_validation = {key: self.featureVectors[key] for key in validation_index}
            try:
                self.Y_validation = {key: self.Y[key] for key in validation_index}
            except KeyError:
                errorMsg = 'Participant {} self.Y is empty, so no data for {}'.format(str(self.nParticipant),
                                                                                      'self.Y_validation')
                self.events.addEvent(204, errorMsg)
                print(errorMsg)
        return self.X_train, self.Y_train, self.X_test, self.Y_test, self.X_validation, self.Y_validation

    def saveSplitedSetToCSV(self, seed=None):
        if seed is None:
            seed = self.randomSeed
        names = ['X_train', 'X_test', 'X_validation', 'Y_train', 'Y_test', 'Y_validation']
        sets = [self.X_train, self.X_test, self.X_validation, self.Y_train, self.Y_test, self.Y_validation]
        pathname = 'training_data/seed={}&train={}&test={}&val={}/'.format(str(seed), str(self.trainPart),
                                                                           str(self.testPart), str(self.validationPart))
        if not os.path.isdir(pathname):
            os.makedirs(pathname)
        for name, set in zip(names, sets):
            if set != {}:
                file_name = '{1}_{2}_{0}.csv'.format(str(seed), str(self.nParticipant), name)
                pd.DataFrame.from_dict(set, orient='index').to_csv(pathname+file_name)

    def saveFeatureVectorToCSV(self):
        filename = 'feature_vectors/FV{}.csv'.format(str(self.nParticipant))
        pd.DataFrame.from_dict(self.featureVectors, orient='index').to_csv(filename)

    def saveYVectorToCSV(self, yType):
        filename = 'feature_vectors/YV'+str(self.nParticipant)+yType+'.csv'
        pd.DataFrame.from_dict(self.Y, orient='index').to_csv(filename)

    #  in most cases we no need to recalculate features from data, so it's necessary to load the previously computed
    #  (and saved to *.csv) featureVectors
    def loadFeatureVectorsFromCSV(self):
        filename = 'feature_vectors/FV{}.csv'.format(str(self.nParticipant))
        self.featureVectors = pd.DataFrame.from_csv(filename).to_dict(orient='index')
示例#6
0
class HarvesterLog(object):
    def __init__(self, stateDir, logDir, name):
        self._name = name
        self._logDir = logDir
        ensureDirectory(logDir)
        ensureDirectory(stateDir)
        self._ids = Ids(stateDir, name)
        self._invalidIds = Ids(stateDir, name + "_invalid")
        self._state = State(stateDir, name)
        self._eventlogger = EventLogger(logDir + '/' + name +'.events')
        self._resetCounts()

    def isCurrentDay(self, date):
        return date.split('T')[0] == self._state.getTime().split()[0]

    def startRepository(self):
        self._resetCounts()
        self._state.markStarted()

    def _resetCounts(self):
        self._harvestedCount = 0
        self._uploadedCount = 0
        self._deletedCount = 0

    def totalIds(self):
        return len(self._ids)

    def totalInvalidIds(self):
        return len(self._invalidIds)

    def eventLogger(self):
        # Should be removed, but is still used in Harvester.
        return self._eventlogger

    def markDeleted(self):
        self._ids.clear()
        self._state.markDeleted()
        self._eventlogger.logSuccess('Harvested/Uploaded/Deleted/Total: 0/0/0/0, Done: Deleted all ids.', id=self._name)

    def endRepository(self, token, responseDate):
        self._state.markHarvested(self.countsSummary(), token, responseDate)
        self._eventlogger.logSuccess('Harvested/Uploaded/Deleted/Total: %s, ResumptionToken: %s' % (self.countsSummary(), token), id=self._name)

    def endWithException(self, exType, exValue, exTb):
        self._state.markException(exType, exValue, self.countsSummary())
        error = '|'.join(str.strip(s) for s in traceback.format_exception(exType, exValue, exTb))
        self._eventlogger.logError(error, id=self._name)

    def countsSummary(self):
        return '%d/%d/%d/%d' % (self._harvestedCount, self._uploadedCount, self._deletedCount, self.totalIds())

    def close(self):
        self._eventlogger.close()
        self._ids.close()
        self._invalidIds.close()
        self._state.close()

    def notifyHarvestedRecord(self, uploadid):
        self._removeFromInvalidData(uploadid)
        self._harvestedCount += 1

    def uploadIdentifier(self, uploadid):
        self._ids.add(uploadid)
        self._uploadedCount += 1

    def deleteIdentifier(self, uploadid):
        self._ids.remove(uploadid)
        self._deletedCount += 1

    def logInvalidData(self, uploadid, message):
        self._invalidIds.add(uploadid)
        filePath = self._invalidDataMessageFilePath(uploadid)
        ensureDirectory(dirname(filePath))
        open(filePath, 'w').write(message)

    def logIgnoredIdentifierWarning(self, uploadid):
        self._eventlogger.logWarning('IGNORED', uploadid)

    def clearInvalidData(self, repositoryId):
        for id in list(self._invalidIds):
            if id.startswith("%s:" % repositoryId):
                self._invalidIds.remove(id)
        rmtree(join(self._logDir, INVALID_DATA_MESSAGES_DIR, repositoryId))

    def hasWork(self, continuousInterval=None):
        if continuousInterval is not None:
            from_ = self._state.from_
            if from_ and 'T' not in from_:
                from_ += "T00:00:00Z"
            return from_ is None or ZuluTime().epoch - ZuluTime(from_).epoch > continuousInterval
        return self._state.token or self._state.from_ is None or not self.isCurrentDay(self._state.from_)

    def state(self):
        return self._state

    def invalidIds(self):
        return list(self._invalidIds)

    def _removeFromInvalidData(self, uploadid):
        self._invalidIds.remove(uploadid)
        invalidDataMessageFilePath = self._invalidDataMessageFilePath(uploadid)
        if isfile(invalidDataMessageFilePath):
            remove(invalidDataMessageFilePath)

    def _invalidDataMessageFilePath(self, uploadid):
        repositoryId, recordId = uploadid.split(":", 1)
        return join(self._logDir, INVALID_DATA_MESSAGES_DIR, escapeFilename(repositoryId), escapeFilename(recordId))