Exemplo n.º 1
0
 def makeDecision(self, config, save=True):
     """
     Function making the decision upon the predicted probas from analyze
     method.
     Each file is dealt with as a Recording object, created, loaded,
     decisions are made, and saved again.
     See Recording.makeDecision for more info.
     """
     if self._verbatim > 0:
         print(
             '\n\n *** DATASET ANALYSIS: MAKING DECISION ON PREDICTIONS ***'
         )
     tStartGlobal = time.time()
     for file_path in self.files:
         recording = Recording(file_path, config, verbatim=self._verbatim)
         recording.load(config)
         tStart = time.time()
         recording.makeDecision(config)
         tEnd = time.time()
         if save:
             recording.save(config)
         if self._verbatim > 1:
             print(
                 '\tRecording has been re-analyzed: decisions on predictions have been made',
                 file_path, tEnd - tStart)
     tEndGlobal = time.time()
     if self._verbatim > 0:
         print(
             'Dataset has been re-analyzed: decisions on predictions have been made',
             tEndGlobal - tStartGlobal)
     return
Exemplo n.º 2
0
 def display(self,
             config,
             onlineDisplay=False,
             saveDisplay=True,
             forChecking=False,
             labelEncoder=None):
     """
     Function displaying all files of the dataset.
     Each file is dealt with as a Recording object, created and displayed.
     See Recording.analyze for more info.
     labelEncoder is only needed if forChecking.
     forChecking creates a file per observation and sort them in class by
     class folders that the expert can then review.
     Check README for more information.
     """
     if self._verbatim > 0:
         print('\n\n *** DATASET DISPLAY ***')
     tStartGlobal = time.time()
     for file_path in self.files:
         recording = Recording(file_path, config, verbatim=self._verbatim)
         recording.load(config)
         tStart = time.time()
         recording.display(config,
                           onlineDisplay=onlineDisplay,
                           saveDisplay=saveDisplay,
                           forChecking=forChecking,
                           labelEncoder=labelEncoder)
         tEnd = time.time()
         if self._verbatim > 1:
             print('\tRecording has been loaded and displayed', file_path,
                   tEnd - tStart)
     tEndGlobal = time.time()
     if self._verbatim > 0:
         print('Dataset has been displayed', tEndGlobal - tStartGlobal)
     return
Exemplo n.º 3
0
    def load(self):
        treestr = self.level*TAB + self.name + '/'
        # print string to tree hierarchy and screen
        self.writetree(treestr + '\n')
        print(treestr)
        # collect recording names: 1st char of each name must be a digit, that's all:
        rnames = [ name for name in os.listdir(self.path)
                   if os.path.isdir(os.path.join(self.path, name))
                   and name[0].isdigit() ]
        rnames.sort() # alphabetical order
        dt = 0 # calculate total track duration by summing durations of all recordings
        for rname in rnames:
            path = os.path.join(self.path, rname)
            recording = Recording(path, track=self)
            recording.load()
            self.r[recording.id] = recording
            self.__setattr__('r' + str(recording.id), recording) # add shortcut attrib
            dt += recording.dt
        self.rnames = rnames # easy way to print out all recording names
        self.dt = dt
        self.dtsec = self.dt / 1e6
        self.dtmin = self.dtsec / 60
        self.dthour = self.dtmin / 60

        # create a TrackSort with TrackNeurons:
        self.sort = TrackSort(self)
        self.sort.load()
        # one way of calculating self.trange:
        #tranges = np.asarray([ n.trange for n in self.alln.values() ])
        #self.trange = min(tranges[:, 0]), max(tranges[:, 1])
        # better way of calculating self.trange:
        rids = sorted(self.r.keys()) # all recording ids in self
        r0 = self.r[rids[0]]
        r1 = self.r[rids[-1]]
        assert r0.datetime == self.datetime
        self.trange = r0.td+r0.trange[0], r1.td+r1.trange[1]

        self.calc_meanrates()

        # pttype better be the same for all member recordings:
        pttype = self.r[rids[0]].pttype # init to pttype of first recording
        for rid in rids[1:]:
            r = self.r[rid]
            # if recording doesn't have a pttype, it's probably from an old .spk file,
            # so don't bother doing this test:
            if hasattr(r, 'pttype') and pttype != r.pttype:
                raise ValueError("inconsistent polytrode types %r and %r in track %s"
                                 % (pttype, r.pttype, self.id))
Exemplo n.º 4
0
    def load(self):
        treestr = self.level * TAB + self.name + '/'
        # print string to tree hierarchy and screen
        self.writetree(treestr + '\n')
        print(treestr)
        dirnames = [
            name for name in os.listdir(self.path)
            if os.path.isdir(os.path.join(self.path, name))
        ]
        # collect recording names: either the 1st char of each name must be a digit,
        # or the last _ separated field must be an 'e' (for 'experiment') followed by a number:
        rnames = []
        for dirname in dirnames:
            if dirname[0].isdigit():
                rnames.append(dirname)
            else:
                lastfield = dirname.split('_')[-1]
                if lastfield[0] == 'e' and lastfield[1:].isnumeric():
                    rnames.append(dirname)
        rnames.sort()  # alphabetical order
        dt = 0  # calculate total track duration by summing durations of all recordings
        # does this track have any missing sorts, or rely on old impoverished .spk files?:
        missingsort, simplesort = False, False
        for rname in rnames:
            path = os.path.join(self.path, rname)
            recording = Recording(path, track=self)
            recording.load()
            if recording.sort == None:
                missingsort = True
            elif type(
                    recording.sort.header) in [core.SPKHeader, core.MATHeader]:
                simplesort = True
            self.r[recording.id] = recording
            self.__setattr__('r' + str(recording.id),
                             recording)  # add shortcut attrib
            dt += recording.dt
        self.rnames = rnames  # easy way to print out all recording names
        self.dt = dt
        self.dtsec = self.dt / 1e6
        self.dtmin = self.dtsec / 60
        self.dthour = self.dtmin / 60

        if len(rnames) == 0:
            return  # no recordings in this track, nothing else to do

        if missingsort or simplesort:
            return  # skip all below due to missing or impoverished sort files (.mat or .spk)

        # create a TrackSort with TrackNeurons:
        self.sort = TrackSort(self)
        self.sort.load()
        # load RF type for each cell, should be one big dict indexed by nid:
        rftypefname = os.path.join(self.path, self.absname + '.rftype')
        try:
            with open(rftypefname, 'r') as f:
                rftypestr = f.read()
            rftypes = eval(rftypestr)
            for nid, rftype in rftypes.items():
                assert rftype in ['simple', 'complex', 'LGN', None]
                self.alln[nid].rftype = rftype
        except IOError:  # no absname.rftype file denoting RF type of each cell
            pass
        # load spike type for each cell, should be one big dict indexed by nid:
        spiketypefname = os.path.join(self.path, self.absname + '.spiketype')
        try:
            with open(spiketypefname, 'r') as f:
                spiketypestr = f.read()
            spiketypes = eval(spiketypestr)
            for nid, spiketype in spiketypes.items():
                assert spiketype in ['fast', 'slow', 'fastasym', 'slowasym']
                self.alln[nid].spiketype = spiketype
        except IOError:  # no absname.spiketype file denoting RF type of each cell
            pass

        # calculate tranges, representing start and stop times (us) of child recordings
        # relative to start of track:
        rids = sorted(self.r)  # all recording ids in self
        r0 = self.r[rids[0]]
        assert r0.datetime == self.datetime
        tranges = []
        for rid in rids:
            rec = self.r[rid]
            # rec.td is time delta (us) between start of track and start of recording
            trange = rec.td + rec.trange[0], rec.td + rec.trange[1]
            tranges.append(trange)

        self.tranges = np.array(tranges)  # each row is a recording trange
        self.trange = self.tranges[0, 0], self.tranges[-1, 1]

        self.calc_meanrates()

        # pttype better be the same for all member recordings:
        pttype = self.r[rids[0]].pttype  # init to pttype of first recording
        for rid in rids[1:]:
            r = self.r[rid]
            # if recording doesn't have a pttype, it's probably from an old .spk file,
            # so don't bother doing this test:
            if hasattr(r, 'pttype') and pttype != r.pttype:
                raise ValueError(
                    "inconsistent polytrode types %r and %r in track %s" %
                    (pttype, r.pttype, self.id))
Exemplo n.º 5
0
    def load(self):
        treestr = self.level*TAB + self.name + '/'
        # print string to tree hierarchy and screen
        self.writetree(treestr + '\n')
        print(treestr)
        # collect recording names: 1st char of each name must be a digit, that's all:
        rnames = [ name for name in os.listdir(self.path)
                   if os.path.isdir(os.path.join(self.path, name))
                   and name[0].isdigit() ]
        rnames.sort() # alphabetical order
        dt = 0 # calculate total track duration by summing durations of all recordings
        # does this track have any missing sorts, or rely on old impoverished .spk files?:
        missingsort, spksort = False, False
        for rname in rnames:
            path = os.path.join(self.path, rname)
            recording = Recording(path, track=self)
            recording.load()
            if recording.sort == None:
                missingsort = True
            elif type(recording.sort.header) == core.SPKHeader:
                spksort = True
            self.r[recording.id] = recording
            self.__setattr__('r' + str(recording.id), recording) # add shortcut attrib
            dt += recording.dt
        self.rnames = rnames # easy way to print out all recording names
        self.dt = dt
        self.dtsec = self.dt / 1e6
        self.dtmin = self.dtsec / 60
        self.dthour = self.dtmin / 60

        if len(rnames) == 0:
            return # no recordings in this track, nothing else to do

        if missingsort or spksort:
            return # skip all below due to missing .ptcs or use of impoverished .spk files

        # create a TrackSort with TrackNeurons:
        self.sort = TrackSort(self)
        self.sort.load()
        # load RF type for each cell, should be one big dict indexed by nid:
        rftypefname = os.path.join(self.path, self.absname + '.rftype')
        try:
            with open(rftypefname, 'r') as f:
                rftypestr = f.read()
            rftypes = eval(rftypestr)
            for nid, rftype in rftypes.items():
                assert rftype in ['simple', 'complex', 'LGN', None]
                self.alln[nid].rftype = rftype
        except IOError: # no absname.rftype file denoting RF type of each cell
            pass
        # load spike type for each cell, should be one big dict indexed by nid:
        spiketypefname = os.path.join(self.path, self.absname + '.spiketype')
        try:
            with open(spiketypefname, 'r') as f:
                spiketypestr = f.read()
            spiketypes = eval(spiketypestr)
            for nid, spiketype in spiketypes.items():
                assert spiketype in ['fast', 'slow', 'fastasym', 'slowasym']
                self.alln[nid].spiketype = spiketype
        except IOError: # no absname.spiketype file denoting RF type of each cell
            pass

        # calculate tranges, representing start and stop times (us) of child recordings
        # relative to start of track:
        rids = sorted(self.r.keys()) # all recording ids in self
        r0 = self.r[rids[0]]
        assert r0.datetime == self.datetime
        tranges = []
        for rid in rids:
            rec = self.r[rid]
            # rec.td is time delta (us) between start of track and start of recording
            trange = rec.td+rec.trange[0], rec.td+rec.trange[1]
            tranges.append(trange)

        self.tranges = np.array(tranges) # each row is a recording trange
        self.trange = self.tranges[0, 0], self.tranges[-1, 1]

        self.calc_meanrates()

        # pttype better be the same for all member recordings:
        pttype = self.r[rids[0]].pttype # init to pttype of first recording
        for rid in rids[1:]:
            r = self.r[rid]
            # if recording doesn't have a pttype, it's probably from an old .spk file,
            # so don't bother doing this test:
            if hasattr(r, 'pttype') and pttype != r.pttype:
                raise ValueError("inconsistent polytrode types %r and %r in track %s"
                                 % (pttype, r.pttype, self.id))