def track(self):
     self._get_video_info()
     cap = video.ReadVideo(self.input_filename)
     # self.data = dataframes.DataStore(self.data_filename, load=False)
     frames = cap.frames()
     self.data = pd.DataFrame()
     if self.multiprocess:
         p = ThreadPool(4)
         res = []
         for frame in tqdm(frames, 'track', total=cap.num_frames):
             r = p.apply_async(self.analyse_frame, (frame, ),
                               callback=self.append_data)
             res.append(r)
             if len(res) > 50:
                 for r in res:
                     r.wait()
                 res = []
         for r in tqdm(res, 'wait'):
             r.wait()
         p.close()
         p.join()
     else:
         map(self.analyse_frame, tqdm(frames, total=cap.num_frames))
     self.data = self.data.set_index('frame')
     self.data = self.data.sort_index()
    def __init__(self, filename, tracking=False, multiprocess=False):
        """
        Parameters
        ----------
        filename: str
            filepath for video.ReadVideo class

        tracking: bool
            If true, do steps specific to tracking.

        multiprocess: bool
            If true performs tracking on multiple cores
        """
        self.tracking = tracking
        self.parameters = configurations.EXAMPLE_CHILD_PARAMETERS
        #If you want to use the variance method to subtract a bkg img use the following line
        #The bkg image should be stored with the movie with same name + suffix = _bkgimg.png
        #self.parameters['bkg_img'] = cv2.imread(filename[:-5] + '_bkgimg.png')]
        self.ip = preprocessing.Preprocessor(self.parameters)
        self.input_filename = filename
        if self.tracking:
            ParticleTracker.__init__(self, multiprocess=multiprocess)
        else:
            self.cap = video.ReadVideo(self.input_filename)
            self.frame = self.cap.read_next_frame()
    def _track_process(self, group_number):
        """
        Method called by track.

        If not using multiprocess call with group number 0

        Parameters
        ----------
        group_number: int
            Sets the group number for multiprocessing to split the input.
        """
        # Create the DataStore instance
        data_name = (str(group_number) +
                     '.hdf5' if self.multiprocess else self.data_filename)
        with dataframes.DataStore(data_name, load=False) as data:
            data.add_metadata('number_of_frames', self.num_frames)
            data.add_metadata('video_filename', self.input_filename)
            start = self.frame_div * group_number
            self.cap = video.ReadVideo(self.input_filename)
            self.cap.set_frame(start)
            if group_number == 3:
                missing = self.num_frames - 4 * (self.num_frames // 4)
                frame_div = self.frame_div + missing
            else:
                frame_div = self.frame_div
            # Iterate over frames
            for f in tqdm(range(frame_div), 'Tracking'):
                info, boundary, info_headings = self.analyse_frame()
                data.add_tracking_data(start + f,
                                       info,
                                       col_names=info_headings)
                if f == 0:
                    data.add_metadata('boundary', boundary)
 def __init__(self, filename, tracking=False, multiprocess=False):
     self.tracking = tracking
     self.parameters = configurations.TRACKPY_NITRILE_PARAMETERS
     self.ip = preprocessing.Preprocessor(self.parameters)
     self.input_filename = filename
     if self.tracking:
         ParticleTracker.__init__(self, multiprocess=multiprocess)
     else:
         self.cap = video.ReadVideo(self.input_filename)
         self.frame = self.cap.read_next_frame()
Example #5
0
    def __init__(self, filename, tracking=False, multiprocess=False):
        self.tracking = tracking
        self.parameters = configurations.NITRILE_BEADS_PARAMETERS
        self.ip = preprocessing.Preprocessor(self.parameters)
        self.input_filename = filename
        if self.tracking:
            ParticleTracker.__init__(self,
                                     multiprocess=multiprocess,
                                     link_traj=False)
        else:
            self.cap = video.ReadVideo(self.input_filename)
            self.frame = self.cap.read_next_frame()

        self.headings = ('x', 'y', 'r')
Example #6
0
def tracking(filename):
    core_name = os.path.splitext(filename)[0]
    vid_name = core_name + '.MP4'
    data_name = core_name + '.hdf5'
    out_name = core_name + '_check.png'
    data = dataframes.DataStore(data_name)
    crop = data.metadata['crop']
    vid = video.ReadVideo(vid_name)
    print(vid_name)
    frames = np.arange(4)*vid.num_frames//4
    ims = [images.crop_img(vid.find_frame(f), crop) for f in frames]
    circles = [data.get_info(f, ['x', 'y', 'r']) for f in frames]
    new_ims = [images.draw_circles(im, c) for im, c in zip(ims, circles)]
    out = images.vstack(images.hstack(new_ims[0], new_ims[1]),
                        images.hstack(new_ims[2], new_ims[3]))
    images.save(out, out_name)
Example #7
0
def order(filename):
    core_name = os.path.splitext(filename)[0]
    vid_name = filename
    data_name = core_name + '.hdf5'
    out_name = core_name + '_check_order.png'
    data = dataframes.DataStore(data_name)
    crop = data.metadata['crop']
    vid = video.ReadVideo(vid_name)
    frame = 100
    im = images.crop_img(vid.find_frame(frame), crop)
    circles = data.df.loc[frame, ['x', 'y', 'r', 'order_r', 'order_i']]
    circles['order_mag'] = np.abs(circles.order_r + 1j * circles.order_i)
    circles = circles[['x', 'y', 'r', 'order_mag']].values
    out = images.draw_circles(im, circles)
    out = images.add_colorbar(out)
    images.display(out)
    images.save(out, out_name)
    def __init__(self, filename, tracking=False, multiprocess=False):
        """
        Parameters
        ----------
        filename: str
            filepath for video.ReadVideo class

        tracking: bool
            If true, do steps specific to tracking.

        multiprocess: bool
            If true performs tracking on multiple cores
        """
        self.tracking = tracking
        self.parameters = configurations.TRACKPY_PARAMETERS
        self.ip = preprocessing.Preprocessor(self.parameters)
        self.input_filename = filename
        if self.tracking:
            ParticleTracker.__init__(self, multiprocess=multiprocess)
        else:
            self.cap = video.ReadVideo(self.input_filename)
            self.frame = self.cap.read_next_frame()
    def _get_video_info(self):
        """
        Reads properties from the video for other methods:

        self.frame_jump_unit: int
            Number of frames for each process

        self.fps: int
            frames per second from the video

        self.width, self.height: ints
            width and height of processed frame

        self.duty_cycle: ndarray
            duty cycles for each frame in the video
        """
        cap = video.ReadVideo(self.input_filename)
        self.num_frames = cap.num_frames
        # self.frame_div = self.num_frames // self.num_processes
        self.fps = cap.fps
        frame = cap.read_next_frame()
        new_frame, _, _ = self.ip.process(frame)
        self.width, self.height = images.get_width_and_height(new_frame)
Example #10
0
from ParticleTracking.tracking import james_nitrile as jn
from Generic import video, filedialogs
import matplotlib.pyplot as plt
import numpy as np

file = filedialogs.load_filename(directory="/media/data/Data")
vid = video.ReadVideo(file)
frames = vid.num_frames
freqs = jn.read_audio_file(file, frames)
d = np.round((freqs - 1000) / 15)
fig, ax = plt.subplots()
frames = list(range(frames))
ax.plot(frames, d)
ax.set_xlabel('frame')
ax.set_ylabel('Duty Cycle / 1000')
ax2 = ax.twinx()
ax2.plot(frames, d)
ax2.set_ylabel('Frequency (Hz)')
plt.show()
Example #11
0
    functions. Technical report, Cornell University, 2004.
    """
    if preprocess:
        grayscale_img = images.bgr_2_grayscale(img)
        binary_img = adaptive_threshold(grayscale_img,
                                        block_size=block_size,
                                        constant=constant,
                                        mode=mode)
        # noise removal
        kernel = np.ones((3, 3), np.uint8)
        opening = cv2.morphologyEx(binary_img,
                                   cv2.MORPH_OPEN,
                                   kernel,
                                   iterations=2)
        # sure background area
        sure_bg = cv2.dilate(opening, kernel, iterations=3)
    else:
        binary_img = img
    dist_transform = cv2.distanceTransform(binary_img, cv2.DIST_L2, 5)
    return dist_transform


if __name__ == '__main__':
    from Generic import images
    from Generic import video

    read_vid = video.ReadVideo(
        '/media/ppzmis/data/ActiveMatter/bacteria_plastic/bacteria.avi')
    im = read_vid.read_next_frame()

    images.display(im, title='a')
Example #12
0
import os

from tqdm import tqdm

from Generic import images, video, filedialogs

direc = filedialogs.open_directory()
files = filedialogs.get_files_directory(direc + '/*.MP4')

if not os.path.exists(direc + '/first_frames/'):
    os.mkdir(direc + '/first_frames')

i = True
for file in tqdm(files):
    folder, name = os.path.split(file)
    name = name.split('.')[0]
    save_name = folder + '/first_frames/' + name + '.png'
    with video.ReadVideo(file) as vid:
        frame = vid.read_next_frame()
        if i:
            i = False
            cropper = images.InteractiveCrop(frame, 6)
            mask, crop, _, _ = cropper.begin_crop()
        frame = images.crop_and_mask_image(frame, crop, mask)
        images.save(frame, save_name)
Example #13
0
    d = (freqs - 1000) / 15
    return d


def frame_frequency(wave, frames, audio_rate):
    waves = np.array_split(wave, frames)
    freqs = np.array(
        [fourier_transform_peak(wav, 1 / audio_rate) for wav in waves])
    return freqs


def fourier_transform_peak(sig, time_step):
    ft = np.abs(np.fft.fft(sig, 48000))
    freq = np.fft.fftfreq(48000, time_step)
    peak = np.argmax(ft)
    return abs(freq[peak])


if __name__ == "__main__":
    from Generic import video

    file = "/media/data/Data/July2019/RampsN29/15790002.MP4"
    cap = video.ReadVideo(file)
    num_frames = cap.num_frames
    cap.close()
    # %%
    d = read_audio_file(file, num_frames)

    # %%
    wav = audio.extract_wav(file)