def get_radial_brightness_peaks(video_path, row, min_r=15, max_r=20): indice = int(row[0]) frame_number = int(row[1]) x = row[2] y = row[3] video = pims.Cine(video_path) frame = video.get_frame(frame_number - 1) # Select only the portion of the frame corresponfing to current particle (x,y) # And mask so that only an annulus is visible (corresponding to the 'aspas') outer_mask = createCircularMask(800, 1280, center=[x, y], radius=max_r) inner_mask = createCircularMask(800, 1280, center=[x, y], radius=min_r) frame = maskImage(frame, outer_mask) frame = maskImage(frame, ~inner_mask) df = pd.DataFrame(frame) df['y'] = df.index df = pd.melt(df, id_vars=[('y')]) df.rename(columns={'variable': 'x', 'value': 'brightness'}, inplace=True) df = df[df.brightness != 0] df['brightness'] *= (255 / frame.max()) x_rel_to_center = df['x'] - x y_rel_to_center = df['y'] - y #df['angles'] = angle_from_2D_points(x_rel_to_center.astype(int).values, y_rel_to_center.astype(int).values) df['angles'] = angle_from_2D_points( x_rel_to_center.astype(float).values, y_rel_to_center.astype(float).values) df.sort_values('angles', inplace=True) angulos = df['angles'].values new_angulos = np.append(angulos, angulos[:100] + 360) brillo = savgol_filter(df['brightness'], window_length=21, polyorder=3) new_brillo = np.append(brillo, brillo[:100]) picos_indice, picos_altura = find_peaks(new_brillo, distance=int( len(new_brillo) / 24.), width=5, prominence=5) picos = new_angulos[picos_indice] if picos[0] == 0.: picos = np.delete(picos, 0, 0) dips_indice, dips_altura = find_peaks(-new_brillo, distance=int(len(new_brillo) / 24.), width=5, prominence=5) dips = new_angulos[dips_indice] if dips[0] == 0.: dips = np.delete(dips, 0, 0) return (indice, [picos, dips])
def alternative_findMeanRadius(videoPath, initialFrame=0, lastFrame='max', thresh=20, opening_kernel=5): """ Finds mean radius from contour analysis """ all_radiuses = np.array([]) if lastFrame == 'max': # Find number of frames in the video v = pims.Cine(videoPath) lastFrame = v.len() - 1 video = cv2.VideoCapture(videoPath) n = 1 # Simple acumulador, para llevar la cuenta de por cual frame voy while (video.isOpened()): # Leemos el frame actual y lo asignamos a la variable frame frameExists, frame = video.read() if n < initialFrame + 1: n += 1 pass elif n > lastFrame + 1: break else: # Detect circles for current frame bw = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) _, binarized = cv2.threshold(bw, thresh, 255.0, cv2.THRESH_BINARY) opened = morphOperation(binarized, operation='opening', times=1, kernel_size=opening_kernel) closed = morphOperation(opened, operation='closing', times=1, kernel_size=10) contours, hierarchy = cv2.findContours(closed, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) radiuses = detectContourRadius(contours) all_radiuses = np.concatenate((all_radiuses, radiuses)) n += 1 printProgressBar(n, lastFrame + 2 - initialFrame, prefix='Detecting radiuses:', suffix='frames searched') count = all_radiuses.shape[0] mean_radius = np.mean(all_radiuses) std_radius = np.std(all_radiuses) return count, mean_radius, std_radius
def detectCirclesVideo(videoPath, initialFrame=0, lastFrame='max', thresh=20, display_intermediate_steps=False, opening_kernel=5): if lastFrame=='max': # Find number of frames in the video v = pims.Cine(videoPath) lastFrame = v.len()-1 #TODO: falta rellenar la columna size (con el radio medio detectado en los primeros 10 frames por ejemplo) # We first create an empty dataframe to store the circles in the correct format A = pd.DataFrame(np.zeros((1, 2), dtype=np.float64), index=('-1',), columns=('x', 'y')) B = pd.DataFrame(np.full((1, 1), 0, dtype=np.int64), index=('-1',), columns=('frame',)) C = pd.DataFrame(np.full((1, 1), 0, dtype=np.float64), index=('-1',), columns=('size',)) circles_tp = pd.concat((A, C, B), axis=1) # ============================================================================= # try: # meanRadius = findMeanRadius(videoPath, n_frames=10) # except: # meanRadius = 30 # ============================================================================= meanRadius = 29 video = cv2.VideoCapture(videoPath) n = 1 # Simple acumulador, para llevar la cuenta de por cual frame voy while(video.isOpened()): # Leemos el frame actual y lo asignamos a la variable frame frameExists, frame = video.read() if n<initialFrame+1: n+=1 pass elif n>lastFrame+1: break else: # Detect circles for current frame and append them to the general dataframe new_circles = alternative_detectCirclesImage(frame, frame_number=n, meanRadius=meanRadius, display_intermediate_steps=display_intermediate_steps, thresh=thresh) # ============================================================================= # new_circles = detectCircles_watershed(frame, frame_number=n, meanRadius=meanRadius, # display_intermediate_steps=display_intermediate_steps, thresh=thresh) # ============================================================================= circles_tp = pd.concat((circles_tp, new_circles), axis=0) n+=1 printProgressBar(n, lastFrame+2-initialFrame, prefix='Detecting particles:', suffix='frames searched') # Cerramos el stream de video video.release() # We delete the first row of circles_tp, since it was only used for # initialization and is no longer needed. circles_tp = circles_tp.drop('-1') #TODO: Reniciar indexes circles_tp = circles_tp.reset_index(drop=True) return circles_tp
SaveIMG = False #%%Identify all the .cine files inside the test folder start = timer() os.chdir(TestsDir) DirCont = os.listdir() Idx = [ i for i, s in enumerate(DirCont) if os.path.isfile(DirCont[i]) and '.cine' in DirCont[i] ] for i in range(0, len(Idx)): #Reading the Video in .Cine: Vid = pims.Cine(DirCont[Idx[i]]) #Transformation into a numpy array in dorder to work easier with the frames: Vid1 = np.double(np.array(Vid)) end = timer() print(end - start) #%% start = timer() #Background calculation averaging the first n frames without spray: if SameBG and i == 0: b = np.mean(Vid1[br_i:br_e], axis=0) if not os.path.exists('ConfigImages'):
def detect_particles_and_save_data(folder, file): # --- EXPERIMENTAL DETAILS --- vid = pims.Cine(file) original_file = file fps = vid.frame_rate shape = vid.frame_shape date = str(vid.frame_time_stamps[0][0]) exposure = int(1000000 * vid.all_exposures[0]) #in microseconds n_frames = vid.image_count recording_time = n_frames / fps N = int(os.path.split(file)[1].split('_')[1].split('N') [1]) # Metodo guarrero y temporal power = int(os.path.split(file)[1].split('_')[2].split('p')[1]) if power >= 100: power /= 10 lights = 'luzLejana' camera_distance = 0.95 #in meters (bolas, cercana 0.535) particle_diameter_px = 79 particle_diameter_m = 0.0725 pixel_ratio = int(particle_diameter_px / particle_diameter_m) particle_shape = 'rotating disk' system_diameter = 0.725 #in meters packing_fraction = N * (particle_diameter_m / system_diameter)**2 ROI_center = [656, 395] #in pixels ROI_radius = 408 # Hashing function to asign an unique id to each experiment # date+time should be specific enough to tell them apart hash_object = hashlib.md5(date.encode()) experiment_id = str(hash_object.hexdigest()) # SI YA HA SIDO PROCESADO NO TRABAJAR EN ESE ARCHIVO if present_in_folder(experiment_id, folder) == True: print('Experiment', experiment_id, 'already processed') return None # Exit function if os.path.getsize(file) != 31976589832: print('Corrupted file, skipping') return None associated_code = os.path.join(folder, str(experiment_id) + '_code.zip') # I create a dictionary to store all this properties in a .txt file experiment_properties_dict = {} for i in ('experiment_id', 'original_file', 'date', 'shape', 'fps', 'exposure', 'n_frames', 'recording_time', 'camera_distance', 'pixel_ratio', 'particle_diameter_px', 'N', 'particle_shape', 'particle_diameter_m', 'system_diameter', 'packing_fraction', 'lights', 'power', 'associated_code', 'ROI_center', 'ROI_radius'): experiment_properties_dict[i] = locals()[i] # Save to a file with open( os.path.join(folder, str(experiment_id) + '_experiment_info.txt'), 'w') as f: json.dump(experiment_properties_dict, f, indent=0) # ============================================================================= # # Finally we want tho freeze all the code used to track and process data # # into a single .zip file # codefiles = ['__init__.py', # 'detect_blobs.py', # 'calculateVelocity.py', # 'utils.py', # 'graphics.py', # 'export_santos_format.py', # 'stats.py', # 'analysis.py'] # with ZipFile(associated_code, 'w') as myzip: # for f in codefiles: # myzip.write(os.path.join('D:/particleTracking/', f), arcname=f) # ============================================================================= # --- PARTICLE TRACKING --- # DEFAULT DETECTION PARAMETERS opening_kernel = 5 # Size of the kernel for getting rid of spurious features. Careful, large values affect static measuring error thresh = 20 # Threshold for binarization, should increase with exposure # General calibration, accounting for exposure if exposure == 300: thresh = 20 elif exposure == 1000: thresh = 30 elif exposure == 1500: thresh = 30 elif exposure == 2500: thresh = 45 # Specific calibration if 'CamaraCercana' not in file: opening_kernel = 20 if ('CamaraCercana' not in file) and ('exposicion300' in file): thresh = 18 opening_kernel = 25 if 'Foco' in file: thresh += 5 opening_kernel = 25 # --CIRCLE DETECTION-- print('Gasta aqui') circles = detectCirclesVideo(file, thresh=thresh, display_intermediate_steps=False, opening_kernel=opening_kernel, ROI_center=ROI_center, ROI_radius=ROI_radius) # A veces se captan partículas inexistentes muy cerca de los bordes del sistema. (por brillos o reflejos) # Por ello hay que eliminar todo aquello cuyo centro este a menos de n pixeles del borde, # donde n es el radio de la partícula menos un par de pixeles. Este proceso no tiene que ver con el de la ROI, en este caso 15 circles = createCircularROI(circles, ROI_center, ROI_radius - 12) # TRAJECTORY LINKING traj = tp.link_df(circles, 5, memory=0) traj = reorder_rename_dataFrame(traj) # Always run after trackpy # VELOCITY DERIVATION vels = alternative_calculate_velocities(traj, n=1, use_gradient=False) vels = reset_track_indexes( vels ) # Always run after deleting traj or calculate_vels, this fills voids #SAVING RAW DATA circles.to_pickle(os.path.join(folder, str(experiment_id) + '_raw_data.pkl'), compression='xz') traj.to_pickle(os.path.join(folder, str(experiment_id) + '_raw_trajectories.pkl'), compression='xz') vels.to_pickle(os.path.join(folder, str(experiment_id) + '_raw_velocities.pkl'), compression='xz') # CREATION OF REGION OF INTEREST (circular) roi_data = createCircularROI(circles, ROI_center, ROI_radius) roi_traj = tp.link_df(roi_data, 5, memory=0) roi_traj = reorder_rename_dataFrame(roi_traj) # Always run after trackpy roi_traj = reset_track_indexes( roi_traj ) # Always run after deleting traj or calculate_vels, this fills voids # DERIVE VELOCITIES roi_vels = alternative_calculate_velocities(roi_traj, n=1, use_gradient=False) # DELETING SHORT TRAJECTORIES roi_vels = alternative_delete_short_trajectories(roi_vels, minimumFrames=10) roi_vels = reset_track_indexes( roi_vels ) # Always run after deleting traj or calculate_vels, this fills voids # roi_vels = deleteShortTrajectories(roi_vels, minimumFrames=10) # SAVING DATA roi_data.to_pickle(os.path.join(folder, str(experiment_id) + '_roi_data.pkl'), compression='xz') roi_traj.to_pickle(os.path.join( folder, str(experiment_id) + '_roi_trajectories.pkl'), compression='xz') roi_vels.to_pickle(os.path.join(folder, str(experiment_id) + '_roi_velocities.pkl'), compression='xz') # SAVING DATA 'SANTOS' FORMAT roi_traj.to_csv(os.path.join(folder, str(experiment_id) + '_pos_vel_ppp.dat'), sep='\t', header=True, index=False)
if 'x' not in f: filtered.append(f) files = filtered # First, we will extract positions and velocities from .cine files # Partial function that only accept a file as argument func = partial(detect_particles_and_save_data, folder) print(f'Processing videos, extracting positions \n') with mp.Pool(processes=N_CORES) as pool: pool.map(func, files) # Then, from those same .cine files we calculate the angular velocity # of its particles for file in files: vid = pims.Cine(file) date = str(vid.frame_time_stamps[0][0]) hash_object = hashlib.md5(date.encode()) experiment_id = str(hash_object.hexdigest()) print( f'Calculating angular velocities for file: {file}, with id: {experiment_id}' ) data_file = os.path.join(folder, str(experiment_id) + '_raw_trajectories.pkl') # Detect maximums and minimums, save those to a file extreme_points = detect_brightness_maxima(file, data_file) extreme_points.to_pickle(os.path.join( folder, str(experiment_id) + '_extremos.pkl'),
def nstx_gpi_get_data(exp_id=None, data_name=None, no_data=False, options=None, coordinates=None, data_source=None): #translate the input variables to the actual directory name on portal #copy the file from the portal to PC if it is not already there #interpret the .cin file # read the header # read the data if (exp_id is None): raise ValueError('exp_id should be set for NSTX GPI.') if (type(exp_id) is not int): raise TypeError("exp_id should be an integer and not %s" % (type(exp_id))) default_options = { 'Local datapath': 'data', 'Datapath': None, 'Scaling': 'Digit', 'Offset timerange': None, 'Calibration': False, 'Calib. path': 'cal', 'Calib. file': None, 'Phase': None, 'State': None, 'Start delay': 0, 'End delay': 0, 'Download only': False } _options = flap.config.merge_options(default_options, options, data_source='NSTX_GPI') #folder decoder folder = { '_0_': 'Phantom71-5040', '_1_': 'Phantom710-9206', '_2_': 'Phantom73-6747', '_3_': 'Phantom73-6663', '_4_': 'Phantom73-8032', '_5_': 'Phantom710-9205', '_6_': 'Miro4-9373' } data_title = 'NSTX GPI data' if (exp_id < 118929): year = 2005 if (exp_id >= 118929) and (exp_id < 122270): year = 2006 if (exp_id >= 122270) and (exp_id < 126511): year = 2007 if (exp_id >= 126511) and (exp_id < 131565): year = 2008 if (exp_id >= 131565) and (exp_id < 137110): year = 2009 if (exp_id >= 137110): year = 2010 if (year < 2006): cam = '_0_' if (year == 2007 or year == 2008): cam = '_1_' if (year == 2009): cam = '_2_' if (year == 2010): cam = '_5_' if (year < 2006): file_name = 'nstx' + str(exp_id) + '.cin' else: file_name = 'nstx' + cam + str(exp_id) + '.cin' file_folder=_options['Datapath']+'/'+folder[cam]+\ '/'+str(year)+'/' remote_file_name = file_folder + file_name local_file_folder = _options['Local datapath'] + '/' + str(exp_id) + '/' if not os.path.exists(_options['Local datapath']): raise SystemError("The local datapath cannot be found.") return if not (os.path.exists(local_file_folder + file_name)): if not (os.path.exists(local_file_folder)): try: os.mkdir(local_file_folder) except: raise SystemError("The folder cannot be created." + "Dumping the file to scratch") local_file_folder = _options['Local datapath'] + '/scratch' p = subprocess.Popen([ "scp", _options['User'] + "@" + _options['Server'] + ':' + remote_file_name, local_file_folder ]) os.waitpid(p.pid, 0) if not (os.path.exists(local_file_folder + file_name)): raise SystemError( "The file couldn't be transferred to the local directory.") if (_options['Download only']): d = flap.DataObject(data_array=np.asarray([0, 1]), data_unit=None, coordinates=None, exp_id=exp_id, data_title=data_title, info={'Options': _options}, data_source="NSTX_GPI") return d images = pims.Cine(local_file_folder + file_name) data_arr = np.flip( np.asarray(images[:], dtype=np.int16), 2) #The original data is 80x64, this line converts it to 64x80 data_unit = flap.Unit(name='Signal', unit='Digit') #The header dict contains the capture information along with the entire image number and the first_image_no (when the recording started) #The frame_rate corresponds with the one from IDL. trigger_time = images.header_dict['first_image_no'] / images.frame_rate coord = [None] * 6 coord[0] = ( copy.deepcopy( flap.Coordinate( name='Time', unit='s', mode=flap.CoordinateMode(equidistant=True), start=trigger_time, step=1 / float(images.frame_rate), #shape=time_arr.shape, dimension_list=[0]))) coord[1] = (copy.deepcopy( flap.Coordinate(name='Sample', unit='n.a.', mode=flap.CoordinateMode(equidistant=True), start=0, step=1, dimension_list=[0]))) coord[2] = (copy.deepcopy( flap.Coordinate(name='Image x', unit='Pixel', mode=flap.CoordinateMode(equidistant=True), start=0, step=1, shape=[], dimension_list=[1]))) coord[3] = (copy.deepcopy( flap.Coordinate(name='Image y', unit='Pixel', mode=flap.CoordinateMode(equidistant=True), start=0, step=1, shape=[], dimension_list=[2]))) #Get the spatial calibration for the GPI data #This spatial calibration is based on the rz_map.dat which used a linear #approximation for the transformation between pixel and spatial coordinates #This needs to be updated as soon as more information is available on the #calibration coordinates. coeff_r = np.asarray([3.7183594, -0.77821046, 1402.8097 ]) / 1000. #The coordinates are in meters coeff_z = np.asarray([0.18090118, 3.0657776, 70.544312 ]) / 1000. #The coordinates are in meters # This part is not producing appropriate results due to the equidistant spacing and double # coefficients. Slicing is only possible for single steps. # coord[4]=(copy.deepcopy(flap.Coordinate(name='Device R', # unit='m', # mode=flap.CoordinateMode(equidistant=True), # start=coeff_r[2], # step=[coeff_r[0],coeff_r[1]], # dimension_list=[1,2] # ))) # # coord[5]=(copy.deepcopy(flap.Coordinate(name='Device z', # unit='m', # mode=flap.CoordinateMode(equidistant=True), # start=coeff_z[2], # step=[coeff_z[0],coeff_z[1]], # dimension_list=[1,2] # ))) r_coordinates = np.zeros([64, 80]) z_coordinates = np.zeros([64, 80]) for i_x in range(64): for i_y in range(80): r_coordinates[ i_x, i_y] = coeff_r[0] * i_x + coeff_r[1] * i_y + coeff_r[2] z_coordinates[ i_x, i_y] = coeff_z[0] * i_x + coeff_z[1] * i_y + coeff_z[2] coord[4] = (copy.deepcopy( flap.Coordinate(name='Device R', unit='m', mode=flap.CoordinateMode(equidistant=False), values=r_coordinates, shape=r_coordinates.shape, dimension_list=[1, 2]))) coord[5] = (copy.deepcopy( flap.Coordinate(name='Device z', unit='m', mode=flap.CoordinateMode(equidistant=False), values=z_coordinates, shape=z_coordinates.shape, dimension_list=[1, 2]))) _options["Trigger time [s]"] = trigger_time _options["FPS"] = images.frame_rate _options["Sample time [s]"] = 1 / float(images.frame_rate) _options["Exposure time [s]"] = images.tagged_blocks['exposure_only'][0] _options["X size"] = images.frame_shape[0] _options["Y size"] = images.frame_shape[1] _options["Bits"] = images.bitmapinfo_dict["bi_bit_count"] d = flap.DataObject(data_array=data_arr, data_unit=data_unit, coordinates=coord, exp_id=exp_id, data_title=data_title, info={'Options': _options}, data_source="NSTX_GPI") return d