Ejemplo n.º 1
0
 def _dump_params(cls, recording, output_folder, sorter_params, verbose):
     with (output_folder / 'spikeinterface_params.json').open(
             mode='w', encoding='utf8') as f:
         all_params = dict()
         all_params['sorter_name'] = cls.sorter_name
         all_params['sorter_params'] = sorter_params
         json.dump(check_json(all_params), f, indent=4)
Ejemplo n.º 2
0
    def set_params(self,
                   n_components=5,
                   mode='by_channel_local',
                   whiten=True,
                   dtype='float32'):
        """
        Set parameters for waveform extraction

        Parameters
        ----------
        n_components:  int
        
        mode : 'by_channel_local' / 'by_channel_global' / 'concatenated'
        
        whiten: bool
            params transmitted to sklearn.PCA
        
        """
        self._reset()

        assert mode in _possible_modes

        self._params = dict(n_components=int(n_components),
                            mode=str(mode),
                            whiten=bool(whiten),
                            dtype=np.dtype(dtype).str)

        (self.folder / 'params_pca.json').write_text(json.dumps(check_json(
            self._params),
                                                                indent=4),
                                                     encoding='utf8')
    def set_params(self,
                   n_components=5,
                   mode='by_channel_local',
                   whiten=True,
                   dtype='float32'):
        """
        Set parameters for principal component extraction.

        Parameters
        ----------
        n_components: int
            Number of components for PCA model
        mode: str
            'by_channel_local' / 'by_channel_global' / 'concatenated'
        whiten: bool
            Params transmitted to sklearn.PCA
        dtype: np.dtype
            Dtype for pca projections
        """
        self._reset()

        assert mode in _possible_modes, "Invalid mode!"

        self._params = dict(n_components=int(n_components),
                            mode=str(mode),
                            whiten=bool(whiten),
                            dtype=np.dtype(dtype).str)

        (self.folder / 'params_pca.json').write_text(json.dumps(check_json(
            self._params),
                                                                indent=4),
                                                     encoding='utf8')
Ejemplo n.º 4
0
def run_sorter_container(sorter_name,
                         recording,
                         mode,
                         container_image,
                         output_folder=None,
                         remove_existing_folder=True,
                         delete_output_folder=False,
                         verbose=False,
                         raise_error=True,
                         with_output=True,
                         **sorter_params):
    # common code for docker and singularity

    assert platform.system() in ('Linux', 'Darwin'), \
        'run_sorter() with docker is supported only on linux/macos platform '

    if output_folder is None:
        output_folder = sorter_name + '_output'

    SorterClass = sorter_dict[sorter_name]
    output_folder = Path(output_folder).absolute()
    parent_folder = output_folder.parent

    # find input folder of recording for folder bind
    rec_dict = recording.to_dict()
    recording_input_folder = find_recording_folder(rec_dict)

    # create 3 files for communication with container
    # recording dict inside
    (parent_folder / 'in_container_recording.json').write_text(json.dumps(
        check_json(rec_dict), indent=4),
                                                               encoding='utf8')
    # need to share specific parameters
    (parent_folder / 'in_container_params.json').write_text(json.dumps(
        check_json(sorter_params), indent=4),
                                                            encoding='utf8')
    # the py script

    py_script = f"""
import json
from spikeinterface import load_extractor
from spikeinterface.sorters import run_sorter_local

# load recording in docker
recording = load_extractor('{parent_folder}/in_container_recording.json')

# load params in docker
with open('{parent_folder}/in_container_params.json', encoding='utf8', mode='r') as f:
    sorter_params = json.load(f)

# run in docker
output_folder = '{output_folder}'
run_sorter_local('{sorter_name}', recording, output_folder=output_folder,
            remove_existing_folder={remove_existing_folder}, delete_output_folder=False,
            verbose={verbose}, raise_error={raise_error}, **sorter_params)
"""
    (parent_folder / 'in_container_sorter_script.py').write_text(
        py_script, encoding='utf8')

    volumes = {}
    volumes[str(recording_input_folder)] = {
        'bind': str(recording_input_folder),
        'mode': 'ro'
    }
    volumes[str(parent_folder)] = {'bind': str(parent_folder), 'mode': 'rw'}

    extra_kwargs = {}
    if SorterClass.docker_requires_gpu:
        extra_kwargs['requires_gpu'] = True

    container_client = ContainerClient(mode, container_image, volumes,
                                       extra_kwargs)
    if verbose:
        print('Starting container')
    container_client.start()

    # check if container contains spikeinterface already
    cmd_1 = [
        'python', '-c',
        'import spikeinterface; print(spikeinterface.__version__)'
    ]
    cmd_2 = [
        'python', '-c', 'from spikeinterface.sorters import run_sorter_local'
    ]
    res_output = ''
    for cmd in [cmd_1, cmd_2]:
        res_output += str(container_client.run_command(cmd))
    need_si_install = 'ModuleNotFoundError' in res_output

    if need_si_install:
        if 'dev' in si_version:
            if verbose:
                print(
                    f"Installing spikeinterface from sources in {container_image}"
                )
            # TODO later check output
            cmd = 'pip install --upgrade --no-input MEArec'
            res_output = container_client.run_command(cmd)
            cmd = 'pip install --upgrade --no-input git+https://github.com/SpikeInterface/spikeinterface.git#egg=spikeinterface[full]'
            res_output = container_client.run_command(cmd)
            cmd = 'pip install --upgrade --no-input https://github.com/NeuralEnsemble/python-neo/archive/master.zip'
            res_output = container_client.run_command(cmd)
        else:
            if verbose:
                print(
                    f"Installing spikeinterface=={si_version} in {container_image}"
                )
            cmd = f'pip install --upgrade --no-input spikeinterface[full]=={si_version}'
            res_output = container_client.run_command(cmd)
    else:
        # TODO version checking
        if verbose:
            print(f'spikeinterface is already installed in {container_image}')

    # run sorter on folder
    if verbose:
        print(f'Running {sorter_name} sorter inside {container_image}')

    # this do not work with singularity:
    # cmd = 'python "{}"'.format(parent_folder/'in_container_sorter_script.py')
    # this approach is better
    cmd = [
        'python', '{}'.format(parent_folder / 'in_container_sorter_script.py')
    ]
    res_output = container_client.run_command(cmd)
    run_sorter_output = res_output

    # chown folder to user uid
    uid = os.getuid()
    # this do not work with singularity:
    # cmd = f'chown {uid}:{uid} -R "{output_folder}"'
    # this approach is better
    cmd = ['chown', f'{uid}:{uid}', '-R', '{output_folder}']
    res_output = container_client.run_command(cmd)

    if verbose:
        print('Stopping container')
    container_client.stop()

    # clean useless files
    os.remove(parent_folder / 'in_container_recording.json')
    os.remove(parent_folder / 'in_container_params.json')
    os.remove(parent_folder / 'in_container_sorter_script.py')

    # check error
    output_folder = Path(output_folder)
    log_file = output_folder / 'spikeinterface_log.json'
    if not log_file.is_file():
        run_error = True
    else:
        with log_file.open('r', encoding='utf8') as f:
            log = json.load(f)
        run_error = bool(log['error'])

    sorting = None
    if run_error:
        if raise_error:
            raise SpikeSortingError(
                f"Spike sorting in {mode} failed with the following error:\n{run_sorter_output}"
            )
    else:
        if with_output:
            sorting = SorterClass.get_result_from_folder(output_folder)

    if delete_output_folder:
        shutil.rmtree(output_folder)

    return sorting
Ejemplo n.º 5
0
    def run_from_folder(cls, output_folder, raise_error, verbose):
        # need setup_recording to be done.
        output_folder = Path(output_folder)

        # retrieve sorter_name and params
        with (output_folder /
              'spikeinterface_params.json').open(mode='r') as f:
            params = json.load(f)
        sorter_params = params['sorter_params']
        sorter_name = params['sorter_name']

        from .sorterlist import sorter_dict
        SorterClass = sorter_dict[sorter_name]

        # not needed normally
        #  recording = load_extractor(output_folder / 'spikeinterface_recording.json')

        now = datetime.datetime.now()
        log = {
            'sorter_name': str(SorterClass.sorter_name),
            'sorter_version': str(SorterClass.get_sorter_version()),
            'datetime': now,
            'runtime_trace': []
        }
        t0 = time.perf_counter()

        try:
            SorterClass._run_from_folder(output_folder, sorter_params, verbose)
            t1 = time.perf_counter()
            run_time = float(t1 - t0)
            has_error = False
        except Exception as err:
            has_error = True
            run_time = None
            log['error'] = True
            log['error_trace'] = traceback.format_exc()

        log['error'] = has_error
        log['run_time'] = run_time

        # some sorter have a log file dur to shellscript launcher
        runtime_trace_path = output_folder / f'{sorter_name}.log'
        runtime_trace = []
        if runtime_trace_path.is_file():
            with open(runtime_trace_path, 'r') as fp:
                line = fp.readline()
                while line:
                    runtime_trace.append(line.strip())
                    line = fp.readline()
        log['runtime_trace'] = runtime_trace

        # dump to json
        with (output_folder / 'spikeinterface_log.json').open(
                'w', encoding='utf8') as f:
            json.dump(check_json(log), f, indent=4)

        if verbose:
            if has_error:
                print(f'Error running {sorter_name}')
            else:
                print(f'{sorter_name} run time {run_time:0.2f}s')

        if has_error and raise_error:
            print(log['error_trace'])
            raise SpikeSortingError(
                f"Spike sorting failed. You can inspect the runtime trace in {output_folder}/spikeinterface_log.json"
            )

        return run_time
Ejemplo n.º 6
0
def run_sorter_docker(sorter_name,
                      recording,
                      docker_image,
                      output_folder=None,
                      remove_existing_folder=True,
                      delete_output_folder=False,
                      verbose=False,
                      raise_error=True,
                      **sorter_params):

    import docker

    if output_folder is None:
        output_folder = sorter_name + '_output'

    output_folder = Path(output_folder).absolute()
    parent_folder = output_folder.parent
    folder_name = output_folder.stem

    # find input folder of recording for folder bind
    rec_dict = recording.to_dict()
    rec_dict, recording_input_folder = modify_input_folder(
        rec_dict, '/recording_input_folder')

    # create 3 files for communication with docker
    # recordonc dict inside
    (parent_folder / 'in_docker_recording.json').write_text(json.dumps(
        check_json(rec_dict), indent=4),
                                                            encoding='utf8')
    # need to share specific parameters
    (parent_folder / 'in_docker_params.json').write_text(json.dumps(
        check_json(sorter_params), indent=4),
                                                         encoding='utf8')
    # the py script
    py_script = f"""
import json
from spikeinterface import load_extractor
from spikeinterface.sorters import run_sorter_local

# load recorsding in docker
recording = load_extractor('/sorting_output_folder/in_docker_recording.json')

# load params in docker
with open('/sorting_output_folder/in_docker_params.json', encoding='utf8', mode='r') as f:
    sorter_params = json.load(f)

# run in docker
output_folder = '/sorting_output_folder/{folder_name}'
run_sorter_local('{sorter_name}', recording, output_folder=output_folder,
            remove_existing_folder={remove_existing_folder}, delete_output_folder=False,
            verbose={verbose}, raise_error={raise_error}, **sorter_params)
"""
    (parent_folder / 'in_docker_sorter_script.py').write_text(py_script,
                                                              encoding='utf8')

    # docker bind (mount)
    volumes = {
        str(parent_folder): {
            'bind': '/sorting_output_folder',
            'mode': 'rw'
        },
        str(recording_input_folder): {
            'bind': '/recording_input_folder',
            'mode': 'ro'
        },
    }

    client = docker.from_env()

    # check if docker contains spikeinertace already
    cmd = 'python -c "import spikeinterface; print(spikeinterface.__version__)"'
    try:
        res = client.containers.run(docker_image, cmd)
        need_si_install = False
    except docker.errors.ContainerError:
        need_si_install = True

    # create a comman shell list
    commands = []

    if need_si_install:
        if 'dev' in si_version:
            # install from github source several stuff
            cmd = 'pip install --upgrade --force MEArec'
            commands.append(cmd)

            cmd = 'pip install --upgrade --force https://github.com/samuelgarcia/spikeinterface/archive/big_refactoring.zip'
            commands.append(cmd)

            cmd = 'pip install --upgrade --force https://github.com/NeuralEnsemble/python-neo/archive/master.zip'
            commands.append(cmd)
        else:
            # install from pypi with same version as host
            cmd = f'pip install --upgrade --force spikeinterface[full]=={si_version}'
            res = client.containers.run(docker_image, cmd)
            commands.append(cmd)

    # run sorter on folder
    cmd = 'python /sorting_output_folder/in_docker_sorter_script.py'
    commands.append(cmd)

    # put file permission to user (because docker is root...)
    uid = os.getuid()
    cmd = f'chown {uid}:{uid} -R /sorting_output_folder/{folder_name}'
    commands.append(cmd)

    #~ commands = commands[0:4]
    #~ commands = ' ; '.join(commands)
    commands = ' && '.join(commands)
    command = f'sh -c "{commands}"'
    # print(command)

    res = client.containers.run(docker_image, command=command, volumes=volumes)

    # clean useless files
    os.remove(parent_folder / 'in_docker_recording.json')
    os.remove(parent_folder / 'in_docker_params.json')
    os.remove(parent_folder / 'in_docker_sorter_script.py')

    SorterClass = sorter_dict[sorter_name]
    sorting = SorterClass.get_result_from_folder(output_folder)

    if delete_output_folder:
        shutil.rmtree(output_folder)

    return sorting
Ejemplo n.º 7
0
def run_sorter_docker(sorter_name,
                      recording,
                      docker_image,
                      output_folder=None,
                      remove_existing_folder=True,
                      delete_output_folder=False,
                      verbose=False,
                      raise_error=True,
                      with_output=True,
                      **sorter_params):
    import docker

    assert platform.system() in (
        'Linux', 'Darwin'
    ), 'run_sorter() with docker is supported only on linux/macos platform '

    if output_folder is None:
        output_folder = sorter_name + '_output'

    SorterClass = sorter_dict[sorter_name]
    output_folder = Path(output_folder).absolute()
    parent_folder = output_folder.parent
    folder_name = output_folder.stem

    # find input folder of recording for folder bind
    rec_dict = recording.to_dict()
    recording_input_folder = find_recording_folder(rec_dict)

    # create 3 files for communication with docker
    # recordonc dict inside
    (parent_folder / 'in_docker_recording.json').write_text(json.dumps(
        check_json(rec_dict), indent=4),
                                                            encoding='utf8')
    # need to share specific parameters
    (parent_folder / 'in_docker_params.json').write_text(json.dumps(
        check_json(sorter_params), indent=4),
                                                         encoding='utf8')
    # the py script

    py_script = f"""
import json
from spikeinterface import load_extractor
from spikeinterface.sorters import run_sorter_local

# load recorsding in docker
recording = load_extractor('{parent_folder}/in_docker_recording.json')

# load params in docker
with open('{parent_folder}/in_docker_params.json', encoding='utf8', mode='r') as f:
    sorter_params = json.load(f)

# run in docker
output_folder = '{output_folder}'
run_sorter_local('{sorter_name}', recording, output_folder=output_folder,
            remove_existing_folder={remove_existing_folder}, delete_output_folder=False,
            verbose={verbose}, raise_error={raise_error}, **sorter_params)
"""
    (parent_folder / 'in_docker_sorter_script.py').write_text(py_script,
                                                              encoding='utf8')

    volumes = {}
    volumes[str(recording_input_folder)] = {
        'bind': str(recording_input_folder),
        'mode': 'ro'
    }
    volumes[str(parent_folder)] = {'bind': str(parent_folder), 'mode': 'rw'}

    extra_kwargs = {}
    if SorterClass.docker_requires_gpu:
        extra_kwargs["device_requests"] = [
            docker.types.DeviceRequest(count=-1, capabilities=[['gpu']])
        ]

    client = docker.from_env()

    container = client.containers.create(docker_image,
                                         tty=True,
                                         volumes=volumes,
                                         **extra_kwargs)
    if verbose:
        print('Starting container')
    container.start()

    # check if docker contains spikeinertace already
    cmd = 'python -c "import spikeinterface; print(spikeinterface.__version__)"'
    res = container.exec_run(cmd)
    need_si_install = b'ModuleNotFoundError' in res.output

    if need_si_install:
        if 'dev' in si_version:
            if verbose:
                print(
                    f"Installing spikeinterface from sources in {docker_image}"
                )
            # TODO later check output
            cmd = 'pip install --upgrade --force MEArec'
            res = container.exec_run(cmd)
            cmd = 'pip install -e git+https://github.com/SpikeInterface/spikeinterface.git#egg=spikeinterface[full]'
            res = container.exec_run(cmd)
            cmd = 'pip install --upgrade --force https://github.com/NeuralEnsemble/python-neo/archive/master.zip'
            res = container.exec_run(cmd)
        else:
            if verbose:
                print(
                    f"Installing spikeinterface=={si_version} in {docker_image}"
                )
            cmd = f'pip install --upgrade --force spikeinterface[full]=={si_version}'
            res = container.exec_run(cmd)
    else:
        # TODO version checking
        if verbose:
            print(f'spikeinterface is already installed in {docker_image}')

    # run sorter on folder
    if verbose:
        print(f'Running {sorter_name} sorter inside {docker_image}')
    cmd = 'python "{}"'.format(parent_folder / 'in_docker_sorter_script.py')
    res = container.exec_run(cmd)
    run_sorter_output = res.output

    # chown folder to user uid
    uid = os.getuid()
    cmd = f'chown {uid}:{uid} -R "{output_folder}"'
    res = container.exec_run(cmd)

    if verbose:
        print('Stopping container')
    container.stop()

    # clean useless files
    os.remove(parent_folder / 'in_docker_recording.json')
    os.remove(parent_folder / 'in_docker_params.json')
    os.remove(parent_folder / 'in_docker_sorter_script.py')

    # check error
    output_folder = Path(output_folder)
    log_file = output_folder / 'spikeinterface_log.json'
    if not log_file.is_file():
        run_error = True
    else:
        with log_file.open('r', encoding='utf8') as f:
            log = json.load(f)
        run_error = bool(log['error'])

    sorting = None
    if run_error:
        if raise_error:
            raise SpikeSortingError(
                f"Spike sorting in docker failed with the following error:\n{run_sorter_output}"
            )
    else:
        if with_output:
            sorting = SorterClass.get_result_from_folder(output_folder)

    if delete_output_folder:
        shutil.rmtree(output_folder)

    return sorting