def fill_event_store(self, es, sigma=0.32, window=2.0, nfuncs=5): """Given an event store object 'es', populate it from the information contained in this pre_ds data store object. Only handles one tetrode at a time.""" samprate = self.samprate nchannels = len(self.signal) samp_sigma = sigma * 0.001 * samprate samp_window = window * 0.001 * samprate es.make_meta_table(samprate, nchannels, uriunsplit(self.desc)) # See if the default sigma (0.32 ms) and window width (2ms) # will work here: es.make_basis_table() es.make_event_table() es.make_spiketimes_table() es.make_spikebasis_table() # es.make_channel_table() es.make_spikecoefs_table() es.make_spikelabels_table() es.make_default_labels() es.add_basis(0, samp_sigma, samp_window) seq = es.get_basis(0) zc = find_feature_peaks(self.signal[0], seq) last = len(zc) i = 0 iprev = 0 spike_id = -1 prev_id = 0 while i < last: spikep = (zc[i] != 0) if spikep: ts = t0 + i * dt if ((i - iprev) > width): spike_id += 1 es.set_spike_time(spike_id, ts) es.set_spike_basisid(spike_id, 0) es.set_spike_label(spike_id, 0) for chan in range(nchannels): x = ndk.features.get_window(signal[chan], ts, winsize) coefs = ndk.features.waveform_coefs(w, seq) es.set_spike_coefs(spike_id, chan, coefs) iprev = i i = i + 1 if spike_id - prev_id >= 100: if spikeid - previd >= 100: print('{} '.format(spikeid), end="") prev_id = spike_id for j in range(4): chan = j signal = w[j] coefs = ndk.features.waveform_coefs(signal, seq) es.set_spike_coefs(spike_id, chan, coefs) spike_id += 1 for ts, tag in self.events: es.add_event(tag, ts)
def get_redirect(url): # simple case: it's already in the dict url = absolute_from_relative_url(url) if url in redirects: return redirects[url] # Try looking it up without the fragment defrag_url = uritools.uridefrag(url).uri fragment = uritools.uridefrag(url).uri if fragment: if defrag_url in redirects: return uritools.urijoin(redirects[defrag_url], '#'+fragment) # Try fixing http/https to match the TOC url_parts = uritools.urisplit(url) base_url_parts = uritools.urisplit(redirects[base_url]) fixed_scheme_url = uritools.uriunsplit( list(base_url_parts)[:1] + list(url_parts)[1:]) if fixed_scheme_url in redirects: return redirects[fixed_scheme_url] # if same domain, try scraping it if url_parts.host == base_url_parts.host: try: print(f"Scraping url for get_redirect: {url}") scraper_result = scraper.scrape( url, wait_for_selector=config['post_body_selector']) redirects[url] = scraper_result['final_url'] # TODO: Make store this scraped result in the book as well? return redirects[url] except (urllib.error.URLError, ssl.SSLError): return url # TODO: Could return '' or something but for now leaving it seems fine # else, couldn't find it, so leave it alone. return url
def compose(scheme=None, authority=None, path=None, query=None, fragment=None, port=None): parts = uritools.uricompose(scheme=scheme, host=authority, port=port, path=path, query=query, fragment=fragment) return uritools.uriunsplit(parts)
def fill_event_store(self, es, tetrode=0): """Given an event store object 'es', populate it from the information contained in this pre_ds data store object. Only handles one tetrode at a time.""" samprate = self.info[0]['rate'] nchannels = len(self.info) * 8 # If we separate the tetrodes, then only use 4 channels: es.make_meta_table(samprate, 4, uriunsplit(self.desc)) # See if the default sigma (0.32 ms) and window width (2ms) # will work here: es.make_basis_table() es.make_event_table() es.make_spiketimes_table() es.make_spikebasis_table() # es.make_channel_table() es.make_spikecoefs_table() es.make_spikelabels_table() es.make_default_labels() es.add_basis(0, self.sigma, self.window, nfuncs=7, name='pre_basis') seq = es.get_basis(0) spike_id = 0 which = int(tetrode / 2) tnum = tetrode % 2 m = self.info[which] # Does not handle behavioral events: for key, value in m.items(): # Only consider integer keys - these are data records: if isinstance(key, int): ts, tetrode, w = value t = int(tetrode) if t == tnum: es.set_spike_time(spike_id, ts) es.set_spike_basisid(spike_id, 0) es.set_spike_label(spike_id, 0) for j in range(4): chan = j signal = w[j] coefs = ndk.features.waveform_coefs(signal, seq) es.set_spike_coefs(spike_id, chan, coefs) spike_id += 1 for ts, tag in self.events: es.add_event(tag, ts)
def _run(self): started = Event() def start(): log.info('Starting Jupyter server in separate thread') asyncio.set_event_loop_policy(AnyThreadEventLoopPolicy()) notebook_dir = base_system_path(self._data.base, subdir=NOTEBOOKS) options = ['--notebook-dir', notebook_dir] if self._bind is not None: options += ['--ip', self._bind] if self._port is not None: options += ['--port', str(self._port)] if global_dev(): options += ['--debug', '--log-level=DEBUG'] if not geteuid(): options += ['--allow-root', '--no-browser'] # https://github.com/jupyter/notebook/issues/2254 options += ['--NotebookApp.token=""'] log.debug(f'Jupyter options: {options}') JupyterServer.launch_instance(options, started=started) t = Thread(target=start) t.daemon = True t.start() started.wait( ) # set in JupyterServer.start() which is as late as we can get in startup log.debug('Separate thread started') while not hasattr(JupyterServer._instance, 'connection_url' ) or not JupyterServer._instance.connection_url: log.debug('Waiting for connection URL') sleep(1) old_url = JupyterServer._instance.connection_url new_url = uriunsplit( urisplit(old_url)._replace( authority=f'{self._proxy_bind}:{self._proxy_port}')) log.debug(f'Rewrote {old_url} -> {new_url}') self._data.sys.set_constant(SystemConstant.JUPYTER_URL, new_url, force=True) log.info('Jupyter server started') while True: sleep(1)
def open(uri, filename=None): """Given a URI for a data source, open it and return the appropriate data source object.""" if True: desc, rel = parse_uri(uri, filename) else: new_uri, rel = resolve_relative_paths(uri, filename) if rel and filename is None: wprint('Data store path is relative, but no event store is specified.') else: uri = new_uri desc = urisplit(uri) iprint('Opening data store at {}'.format(uriunsplit(desc))) s = desc.scheme if s is None: return ndk.ds.neo_in.spike2(desc) elif s == 'pre': return ndk.ds.pre.pre_ds(desc) elif s == 'smr': return ndk.ds.neo_in.spike2(desc) # elif s == 'file': # return ndk.ds.neo.neo_in(desc) elif s == 'cass': return ndk.ds.cass.cdb(desc) elif s == 'wav': return ndk.ds.wav.wav_ds(desc.path) elif s == 'ds': return ndk.ds.mmap.mmap_ds(desc.path) elif s == 'edf': return ndk.ds.edf_ds(desc) elif s == 'nbm': iprint('NBF path: {}'.format(desc.path)) if rel and desc.path[0] == '/' and desc.path[1] == '.': return ndk.ds.nbf(desc.path[1:]) # Hack! else: return ndk.ds.nbf(desc.path) else: print("Don't know what do with this URI scheme: {} (in {})".format(s, uri))
def __init__(self, uri): self.batch = BatchLoader() try: self.uri = uri options = {'echo': False, 'executemany_mode': 'values'} connect_args = {} uri_parts = urisplit(uri) if uri_parts.query: for name, value in uri_parts.getquerydict().items(): if len(value) > 1: raise Exception(f'Multiple values for option {name}') value = value[0] log.debug(f'Have additional URI option {name} = {value}') if name == 'echo': options[name] = bool(value) elif name == 'executemany_mode': options[name] = value elif name == 'search_path': connect_args['options'] = f'-csearch_path={value}' else: raise Exception(f'Unsupported option {name} = {value}') uri = uriunsplit(uri_parts._replace(query=None)) if uri_parts.scheme != 'postgresql': log.warning( f'Legacy scheme {uri_parts.scheme}; discarding options') options, connect_args = {}, {} log.debug( f'Creating engine for {uri} with options {options} and connect args {connect_args}' ) self.engine = create_engine(uri, **options, connect_args=connect_args) self.session = sessionmaker(bind=self.engine, class_=DirtySession) self.engine.connect().execute( text('select 1')).fetchone() # test connection except: log_current_exception(traceback=False) raise CannotConnect(f'Could not connect to database')
def get_postgres_cnxn(config): return get_cnxn(config, uri=uriunsplit(urisplit(config.args[URI])._replace(path='/postgres')))
def check(self, split, uri): result = uriunsplit(split) self.assertEqual(result, uri)
def __str__(self): return uritools.uriunsplit(self.uri)
def externalize(self): return uritools.uriunsplit(self.url)
def encode_url(url): parts = list(uritools.urisplit(url)) for i in [2, 3, 4]: if parts[i]: parts[i] = urllib.parse.quote(parts[i]) # path return uritools.uriunsplit(parts)
def retrieve(self, headers, path): # TODO: If `curl` is not present, use urllib. curl('-sSfL', uritools.uriunsplit(self.base), '-D', str(headers), '-o', str(path))