コード例 #1
0
ファイル: test_progress.py プロジェクト: greenTara/PyEMMA
 def test_force_finish(self):
     import warnings
     worker = ProgressReporter()
     worker._progress_register(100)
     # intentionally overshoot registered work
     with warnings.catch_warnings(record=True) as cm:
         worker._progress_update(101)
     self.assertIn("more work than registered", cm[0].message[0])
     worker._progress_force_finish()
コード例 #2
0
class TestProgress(unittest.TestCase):
    def setUp(self):
        self.pg = ProgressReporter()
        self.pg._progress_register(100, "test")

    def test_config_override(self):
        self.pg.show_progress = True
        with settings(show_progress_bars=False):
            assert self.pg.show_progress == False

    def test_config_2(self):
        self.pg.show_progress = False
        with settings(show_progress_bars=True):
            assert not self.pg.show_progress
コード例 #3
0
ファイル: test_progress.py プロジェクト: greenTara/PyEMMA
    def test_callback(self):
        self.has_been_called = 0

        def call_back(stage, progressbar, *args, **kw):
            self.has_been_called += 1
            assert isinstance(stage, int)
            assert isinstance(progressbar, ProgressBar)

        amount_of_work = 100
        worker = ProgressReporter()
        worker._progress_register(amount_of_work,
                                  description="hard working",
                                  stage=0)
        worker.register_progress_callback(call_back, stage=0)
        for _ in range(amount_of_work):
            worker._progress_update(1, stage=0)
        self.assertEqual(self.has_been_called, amount_of_work)
コード例 #4
0
def frames_from_files(files,
                      top,
                      frames,
                      chunksize=1000,
                      stride=1,
                      verbose=False,
                      copy_not_join=None,
                      reader=None):
    """
    Constructs a Trajectory object out of given frames collected from files (or given reader).

    :param files: source files
    :param top: topology file
    :param frames: indices
    :param chunksize:
    :param stride:
    :param verbose:
    :param copy_not_join: not used
    :param reader: if a reader is given, ignore files and top param!
    :return: mdtra.Trajectory consisting out of frames indices.
    """
    # Enforce topology to be a md.Topology object
    if reader is None:
        top = _enforce_top(top)
    else:
        if not reader.number_of_trajectories():
            raise ValueError("need at least one trajectory file in reader.")
        if isinstance(reader, FragmentedTrajectoryReader):
            top = reader._readers[0][0].featurizer.topology
        elif isinstance(reader, FeatureReader):
            top = reader.featurizer.topology
        else:
            raise ValueError("unsupported reader (only md readers).")

    stride = int(stride)
    frames = np.array(frames)

    # only one file, so we expect frames to be a one dimensional array
    if isinstance(files, str):
        files = [files]
        if frames.ndim == 1:
            # insert a constant column for file index
            frames = np.insert(np.atleast_2d(frames),
                               0,
                               np.zeros_like(frames),
                               axis=0).T

    if stride != 1:
        frames[:, 1] *= int(stride)
        if verbose:
            log.info('A stride value of = %u was parsed, '
                     'interpreting "indexes" accordingly.' % stride)

    # sort by file and frame index
    sort_inds = np.lexsort((frames[:, 1], frames[:, 0]))
    sorted_inds = frames[sort_inds]
    assert len(sorted_inds) == len(frames)

    file_inds_unique = np.unique(sorted_inds[:, 0])
    # construct reader
    if reader is None:
        # filter out files, we would never read, because no indices are pointing to them
        reader = source(np.array(files)[file_inds_unique].tolist(), top=top)
        # re-map indices to reflect filtered files:
        for itraj, c in zip(file_inds_unique, itertools.count(0)):
            mask = sorted_inds[:, 0] == itraj
            sorted_inds[mask, 0] = c

        inds_to_check = np.arange(len(file_inds_unique))
    else:
        inds_to_check = file_inds_unique

    # sanity check of indices
    for itraj in inds_to_check:
        inds_by_traj = sorted_inds[sorted_inds[:, 0] == itraj]
        largest_ind_in_traj = np.max(inds_by_traj)
        length = reader.trajectory_length(itraj)
        if length < largest_ind_in_traj:
            raise ValueError(
                "largest specified index (%i * stride=%i * %i=%i) "
                "is larger than trajectory length '%s' = %i" %
                (largest_ind_in_traj / stride, largest_ind_in_traj / stride,
                 stride, largest_ind_in_traj, reader.filenames[itraj], length))

    # we want the FeatureReader to return mdtraj.Trajectory objects
    if isinstance(reader, FeatureReader):
        reader._return_traj_obj = True
    elif isinstance(reader, FragmentedTrajectoryReader):
        for file in reader.filenames_flat:
            r = reader.reader_by_filename(file)
            if isinstance(r, FeatureReader):
                r = [r]
            for _r in r:
                _r._return_traj_obj = True

    it = reader.iterator(chunk=chunksize,
                         stride=sorted_inds,
                         return_trajindex=False)
    reporter = ProgressReporter()
    reporter._progress_register(it._n_chunks, description="collecting frames")
    collected_frames = []
    with it:
        for x in it:
            collected_frames.append(x)
            reporter._progress_update(1)
    reporter._progress_force_finish()

    dest = _preallocate_empty_trajectory(top, len(frames))
    i = 0
    for chunk in collected_frames:
        _copy_traj_attributes(dest, chunk, i)
        i += len(chunk)
    dest = dest.slice(sort_inds.argsort(), copy=False)
    return dest