def _read_table1(name: str, group: h5py._hl.group.Group, geom_model: BDF, add_table1: Callable) -> None: identity = group.get('IDENTITY') TID = identity['ID'] CODEX = identity['CODEX'] CODEY = identity['CODEY'] POS = identity['POS'] LEN = identity['LEN'] DOMAIN_ID = identity['DOMAIN_ID'] axis_map = { 0: 'LINEAR', } xy = group.get('XY') X = xy['X'] Y = xy['Y'] for tid, codex, codey, pos, leni in zip(TID, CODEX, CODEY, POS, LEN): if tid >= 100_000_000: continue x = X[pos:pos + leni] y = Y[pos:pos + leni] obj = add_table1(tid, x, y, xaxis=axis_map[codex], yaxis=axis_map[codey], extrap=0, comment='') obj.validate() str(obj)
def convertSpikeData(self, hdf5_tetrode_data: h5py._hl.group.Group): """ Does the spike conversion from OE Spike Sorter format to Axona format tetrode files Parameters ----------- hdf5_tetrode_data - h5py._hl.group.Group - this kind of looks like a dictionary and can, it seems, be treated as one more or less (see http://docs.h5py.org/en/stable/high/group.html). """ # First lets get the datatype for tetrode files as this will be the same for all tetrodes... dt = self.AxonaData.axona_files['.1'] # ... and a basic header for the tetrode file that use for each tetrode file, changing only the num_spikes value header = self.AxonaData.getEmptyHeader("tetrode") header['duration'] = str(int(self.last_pos_ts-self.first_pos_ts)) header['sw_version'] = '1.1.0' header['num_chans'] = '4' header['timebase'] = '96000 hz' header['bytes_per_timestamp'] = '4' header['samples_per_spike'] = '50' header['sample_rate'] = '48000 hz' header['bytes_per_sample'] = '1' header['spike_format'] = 't,ch1,t,ch2,t,ch3,t,ch4' for key in hdf5_tetrode_data.keys(): spiking_data = np.array(hdf5_tetrode_data[key].get('data')) timestamps = np.array(hdf5_tetrode_data[key].get('timestamps')) # check if any of the spiking data is captured before/ after the first/ last bit of position data # if there is then discard this as we potentially have no valid position to align the spike to :( idx = np.logical_or(timestamps < self.first_pos_ts, timestamps > self.last_pos_ts) spiking_data = spiking_data[~idx, :, :] timestamps = timestamps[~idx] # subtract the first pos timestamp from the spiking timestamps timestamps = timestamps - self.first_pos_ts # get the number of spikes here for use below in the header num_spikes = len(timestamps) # repeat the timestamps in tetrode multiples ready for Axona export new_timestamps = np.repeat(timestamps, 4) new_spiking_data = spiking_data.astype(np.float64) # Convert to microvolts... new_spiking_data = new_spiking_data * self.bitvolts # And upsample the spikes... new_spiking_data = self.resample(new_spiking_data, 4, 5, -1) # ... and scale appropriately for Axona and invert as OE seems to be inverted wrt Axona new_spiking_data = new_spiking_data / (self.hp_gain/4/128.0) * (-1) # ... scale them to the gains specified somewhere (not sure where / how to do this yet) shp = new_spiking_data.shape # then reshape them as Axona wants them a bit differently new_spiking_data = np.reshape(new_spiking_data, [shp[0] * shp[1], shp[2]]) # Cap any values outside the range of int8 new_spiking_data[new_spiking_data < -128] = -128 new_spiking_data[new_spiking_data > 127] = 127 # create the new array new_tetrode_data = np.zeros(len(new_timestamps), dtype=dt) new_tetrode_data['ts'] = new_timestamps * 96000 new_tetrode_data['waveform'] = new_spiking_data # change the header num_spikes field header['num_spikes'] = str(num_spikes) i_tetnum = key.split('electrode')[1] print("Exporting tetrode {}".format(i_tetnum)) self.writeTetrodeData(i_tetnum, header, new_tetrode_data)
def all_traces_recursive(group: h5py._hl.group.Group, stream: RFStream, pattern: str) -> RFStream: """ Recursively, appends all traces in a h5py group to the input stream. In addition this will check whether the data matches a certain pattern. :param group: group to search through :type group: class:`h5py._hl.group.Group` :param stream: Stream to append the traces to :type stream: CorrStream :param pattern: pattern for the path in the hdf5 file, see fnmatch for details. :type pattern: str :return: Stream with appended traces :rtype: CorrStream """ for v in group.values(): if isinstance(v, h5py._hl.group.Group): all_traces_recursive(v, stream, pattern) elif not fnmatch.fnmatch(v.name, pattern) and v.name not in pattern: continue else: # try: stream.append(RFTrace(np.array(v), header=read_hdf5_header(v))) # except ValueError: # warnings.warn( # 'Header could not be converted. Attributes are: %s' % ( # str(v.attrs))) return stream
def read_pbarl(name: str, group: h5py._hl.group.Group, geom_model: BDF): IDENTITY = group[ 'IDENTITY'] # ('PID', 'MID', 'GROUP', 'TYPE', 'INFO_POS', 'INFO_LEN', 'DOMAIN_ID') INFO = group['INFO'] # ('VALUE',) VALUE = INFO['VALUE'] PID = IDENTITY['PID'] MID = IDENTITY['MID'] GROUP = IDENTITY['GROUP'] TYPE = IDENTITY['TYPE'] INFO_POS = IDENTITY['INFO_POS'] INFO_LEN = IDENTITY['INFO_LEN'] DOMAIN_ID = IDENTITY['DOMAIN_ID'] properties = geom_model.properties for pid, mid, group, bar_type, ipos, ilen in zip(PID, MID, GROUP, TYPE, INFO_POS, INFO_LEN): if pid in properties and properties[pid].type == 'PBAR': del properties[pid] group_str = group.strip().decode('latin1') bar_type_str = bar_type.strip().decode('latin1') dim = VALUE[ipos:ipos + ilen - 1].tolist() nsm = VALUE[ilen - 1] obj = geom_model.add_pbarl(pid, mid, bar_type_str, dim, group=group_str, nsm=nsm, comment='') obj.validate() str(obj)
def read_dresp1(name: str, group: h5py._hl.group.Group, geom_model: BDF) -> None: # TODO: group, not dataset # {'ATTI': None, 'IDENTITY': None} #('ID', 'LABEL', 'RTYPE', 'PTYPE', 'RPSID', 'NTUSED', 'AFPMID', 'REGION', 'ATTA', 'ATTBI', 'ATTBR', 'ATTI_LEN', 'ATTI_POS', 'DOMAIN_ID') IDENTITY = group.get('IDENTITY') ID = IDENTITY['ID'] LABEL = IDENTITY['LABEL'] RTYPE = IDENTITY['RTYPE'] PTYPE = IDENTITY['PTYPE'] RPSID = IDENTITY['RPSID'] NTUSED = IDENTITY['NTUSED'] AFPMID = IDENTITY['AFPMID'] REGION = IDENTITY['REGION'] ATTA = IDENTITY['ATTA'] ATTBI = IDENTITY['ATTBI'] ATTBR = IDENTITY['ATTBR'] ATTI_LEN = IDENTITY['ATTI_LEN'] ATTI_POS = IDENTITY['ATTI_POS'] DOMAIN_ID = IDENTITY['DOMAIN_ID'] #('ATTI',) ATTI_group = group.get('ATTI') ATTI = ATTI_group['ATTI'] for idi, label, rtype, ptype, rpsid, ntused, afpmid, region, atta, attbi, attbr, ilen, ipos in zip( ID, LABEL, RTYPE, PTYPE, RPSID, NTUSED, AFPMID, REGION, ATTA, ATTBI, ATTBR, ATTI_LEN, ATTI_POS): # rpsid # ntused # attbi/r label_str = label.strip().decode('latin1') property_type = ptype.strip().decode('latin1') response_type = rtype.strip().decode('latin1') label_str = label.strip().decode('latin1') if property_type == '': property_type = None atta, attb = get_attb_from_atti(response_type, atta, attbi, attbr) atti = ATTI[ipos:ipos+ilen].tolist() obj = geom_model.add_dresp1(idi, label_str, response_type, property_type, region, atta, attb, atti, validate=True, comment='') assert afpmid == 0, afpmid obj.validate() str(obj)
def _read_table2(name: str, group: h5py._hl.group.Group, geom_model: BDF, add_table2: Callable): #identity = ('ID', 'X1', 'POS', 'LEN', 'DOMAIN_ID') identity = group.get('IDENTITY') TID = identity['ID'] X1 = identity['X1'] POS = identity['POS'] LEN = identity['LEN'] DOMAIN_ID = identity['DOMAIN_ID'] xy = group.get('XY') X = xy['X'] Y = xy['Y'] for tid, x1, pos, leni in zip(TID, X1, POS, LEN): if tid >= 100_000_000: continue x = X[pos:pos + leni] y = Y[pos:pos + leni] obj = add_table2(tid, x1, x, y, comment='') obj.validate() str(obj)
def load_geometry_block(node: h5py._hl.group.Group, name_map: Dict[str, GeomCallable], geom_model: BDF) -> None: assert node is not None, node assert isinstance(node, h5py._hl.group.Group) for name in list(node): group = node.get(name) assert group is not None, name if name in name_map: func = name_map[name] func(name, group, geom_model) else: raise RuntimeError(name)
def _read_table4(name: str, group: h5py._hl.group.Group, geom_model: BDF, add_table4: Callable): #('ID', 'X1', 'X2', 'X3', 'X4', 'POS', 'LEN', 'DOMAIN_ID') identity = group.get('IDENTITY') coef = group.get('COEF') TID = identity['ID'] X1 = identity['X1'] X2 = identity['X2'] X3 = identity['X3'] X4 = identity['X4'] POS = identity['POS'] LEN = identity['LEN'] DOMAIN_ID = identity['DOMAIN_ID'] A = coef['A'] for tid, x1, x2, x3, x4, pos, leni in zip(TID, X1, X2, X3, X4, POS, LEN): if tid >= 100_000_000: continue a = A[pos:pos + leni] obj = add_table4(tid, x1, x2, x3, x4, a) obj.validate() str(obj)
def read_dvprel1(name: str, group: h5py._hl.group.Group, geom_model: BDF) -> None: # TODO: group, not dataset # {'ATTI': None, 'IDENTITY': None} ('ID', 'TYPE', 'PID', 'FID', 'PMIN', 'PMAX', 'C0', 'PNAME', 'START', 'LEN', 'DOMAIN_ID') IDENTITY = group.get('IDENTITY') RELATION = group.get('RELATION') # ('DVID', 'COEF') DVID = RELATION['DVID'] COEF = RELATION['COEF'] ID = IDENTITY['ID'] TYPE = IDENTITY['TYPE'] PID = IDENTITY['PID'] FID = IDENTITY['FID'] PMIN = IDENTITY['PMIN'] PMAX = IDENTITY['PMAX'] C0 = IDENTITY['C0'] PNAME = IDENTITY['PNAME'] START = IDENTITY['START'] LEN = IDENTITY['LEN'] DOMAIN_ID = IDENTITY['DOMAIN_ID'] for oid, typei, pid, fid, pmin, pmax, c0, pname, ipos, ilen in zip( ID, TYPE, PID, FID, PMIN, PMAX, C0, PNAME, START, LEN): pname_str = pname.strip().decode('latin1') prop_type = typei.strip().decode('latin1') dvids = DVID[ipos:ipos+ilen] coeffs = COEF[ipos:ipos+ilen] if pname_str: pname_fid = pname_str elif fid != 0: pname_fid = fid else: out = (pname_str, fid) raise RuntimeError(out) obj = geom_model.add_dvprel1(oid, prop_type, pid, pname_fid, dvids, coeffs, p_min=pmin, p_max=pmax, c0=c0, validate=True, comment='') obj.validate() str(obj)
def read_rbe2(name: str, group: h5py._hl.group.Group, geom_model: BDF) -> None: gm_group = group.get('GM') rb_group = group.get('RB') assert gm_group is not None, gm_group assert rb_group is not None, rb_group assert isinstance(group, h5py._hl.group.Group) #names = group.dtype.names EID = rb_group['EID'] GN = rb_group['GN'] CM = rb_group['CM'] GM_POS = rb_group['GM_POS'] GM_LEN = rb_group['GM_LEN'] ALPHA = rb_group['ALPHA'] DOMAIN = rb_group['DOMAIN_ID'] GM = gm_group['ID'] for eid, gn, cm, gm_pos, gm_len, alpha, domain in zip( EID, GN, CM, GM_POS, GM_LEN, ALPHA, DOMAIN): Gmi = GM[gm_pos:gm_pos + gm_len] assert len(Gmi) == gm_len rbe2 = geom_model.add_rbe2(eid, gn, cm, Gmi, alpha=alpha, comment='') rbe2.validate()
def attr_var_unit(g: h5._hl.group.Group, str_var: str, str_unit: str, value): g.attrs[str_var] = value g.attrs[str_var + '__unit'] = np.string_(str_unit)
def get_a_single_key(cls, group: h5py._hl.group.Group) -> str: keys = list(group.keys()) if len(keys) == 1: return str(keys[0]) else: return ''
def read_pbeaml(name: str, group: h5py._hl.group.Group, geom_model: BDF): IDENTITY = group[ 'IDENTITY'] # ('PID', 'MID', 'GROUP', 'TYPE', 'SECTION_POS', 'SECTION_LEN', 'DOMAIN_ID') DIMS = group['DIMS'] # ('DIM',) SECTION = group[ 'SECTION'] # ('SO', 'RDIST', 'DIMS_POS', 'DIMS_LEN', 'NSM') DIM = DIMS['DIM'] SO = SECTION['SO'] RDIST = SECTION['RDIST'] DIMS_POS = SECTION['DIMS_POS'] DIMS_LEN = SECTION['DIMS_LEN'] NSM = SECTION['NSM'] PID = IDENTITY['PID'] MID = IDENTITY['MID'] GROUP = IDENTITY['GROUP'] TYPE = IDENTITY['TYPE'] SECTION_POS = IDENTITY['SECTION_POS'] SECTION_LEN = IDENTITY['SECTION_LEN'] DOMAIN_ID = IDENTITY['DOMAIN_ID'] properties = geom_model.properties for pid, mid, group, beam_type, spos, slen in zip(PID, MID, GROUP, TYPE, SECTION_POS, SECTION_LEN): if pid in properties and properties[pid].type == 'PBEAM': del properties[pid] group_str = group.strip().decode('latin1') beam_type_str = beam_type.strip().decode('latin1') # section position/length si = slice(spos, spos + slen) xxb = RDIST[si] uxxb, ixxb = np.unique(xxb, return_index=True) xxb2 = xxb[ixxb] so = SO[si][ixxb] nsm = NSM[si][ixxb] dim_pos = DIMS_POS[si][ixxb] dim_len = DIMS_LEN[si][ixxb] # dim position/length dims = [] for ipos, ilen in zip(dim_pos, dim_len): dimsi = DIM[ipos:ipos + ilen] dims.append(dimsi) #uxxb, ixxb = np.unique(xxb, return_index=True) #xxb2 = xxb[ixxb] #dims2 = [dims[i] for i in ixxb] #so2 = so[ixxb] #nsm2 = nsm[ixxb] obj = geom_model.add_pbeaml(pid, mid, beam_type_str, xxb2, dims, so=so, nsm=nsm, group=group_str, comment='') obj.validate() str(obj)