def __init__(self, projectID=None, modelID=None, workers=None, summaryFile=None): self.projectID = projectID self.fileManager = FM(projectID=projectID, modelID=modelID, summaryFile=summaryFile) self.modelID = modelID if not self._checkProjectID(): raise Exception(projectID + ' is not valid.') self.workers = workers
def __init__(self): # 1: Define valid commands and ignore warnings self.commands = [ 'New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete', 'Snapshots' ] np.seterr(invalid='ignore') # 2: Determine which Kinect is attached (This script can handle v1 or v2 Kinects) self._identifyDevice() #Stored in self.device self.system = platform.node() # 3: Create file manager self.fileManager = FM() # 4: Download credential files self.fileManager.downloadData( self.fileManager.localCredentialSpreadsheet) self.fileManager.downloadData(self.fileManager.localCredentialDrive) self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet # Rename to make code readable # 5: Connect to Google Spreadsheets self._authenticateGoogleSpreadSheets() #Creates self.controllerGS self._modifyPiGS(error='') # 6: Start PiCamera self.camera = PiCamera() self.camera.resolution = (1296, 972) self.camera.framerate = 30 self.piCamera = 'True' # 7: Keep track of processes spawned to convert and upload videofiles self.processes = [] # 8: Set size of frame try: self.r except AttributeError: self.r = (0, 0, 640, 480) # 9: Await instructions self.monitorCommands()
def __init__(self, all_data): self.all_data = all_data # Flag to keep all data if desired # 1: Define valid commands and ignore warnings self.commands = ['New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete'] np.seterr(invalid='ignore') # 2: Determine which depth sensor is attached (This script can handle DepthSense cameras) self._identifyDevice() #Stored in self.device self.system = platform.node() # 3: Create file manager self.fileManager = FM() # 4: Start PiCamera self.camera = PiCamera() self.camera.resolution = (1296, 972) self.camera.framerate = 30 self.piCamera = 'True' # 5: Download credential files self.fileManager.downloadData(self.fileManager.localCredentialDir) self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet # Rename to make code readable self._authenticateGoogleSpreadSheets() #Creates self.controllerGS self._identifyTank() #Stored in self.tankID self._identifyServiceAccount() # 6: Keep track of processes spawned to convert and upload videofiles self.processes = [] # 7: Set size of frame try: self.r except AttributeError: self.r = (0,0,640,480) # 9: Await instructions print('Monitoring commands') self.monitorCommands()
import argparse, subprocess, pdb, shutil, os import pandas as pd from cichlid_bower_tracking.helper_modules.file_manager import FileManager as FM parser = argparse.ArgumentParser(description='This script is used to manually prepared projects for downstream analysis') parser.add_argument('--SummaryFile', type = str, help = 'Restrict analysis to projectIDs specified in csv file, which will be rewritten. ProjectIDs must be found in a column called projectID') parser.add_argument('--Start', type = int) parser.add_argument('--Total', type = int) args = parser.parse_args() fm_obj = FM() if args.SummaryFile is not None: summary_file = fm_obj.localAnalysisStatesDir + args.SummaryFile fm_obj.downloadData(summary_file) dt = pd.read_csv(summary_file, index_col = False) projectIDs = list(dt.projectID) if args.Start is not None: projectIDs = projectIDs[args.Start: args.Start + args.Total] else: projectIDs = fm_obj.getAllProjectIDs() for projectID in projectIDs: fm_obj = FM(projectID = projectID) print(projectID) lp = fm_obj.lp main_directory_data = subprocess.run(['rclone', 'lsf', 'cichlidVideo:McGrath/Apps/CichlidPiData/' + '__ProjectData/' + projectID + '/'], capture_output = True, encoding = 'utf-8').stdout.split('\n') for bad_data in ['AllClips.tar', 'MLClips.tar', 'MLFrames.tar', 'Backgrounds.tar']: if bad_data in main_directory_data:
parser.add_argument('-i', '--Initials', required=True, type=str, help='Initials to save annotations') args = parser.parse_args() numbers = {} # Identify projects to run analysis on if args.ProjectIDs is not None: projectIDs = args.ProjectIDs # Specified at the command line for projectID in projectIDs: numbers[projectID] = args.Number else: fm_obj = FM() summary_file = fm_obj.localAnalysisStatesDir + args.SummaryFile fm_obj.downloadData(summary_file) dt = pd.read_csv(summary_file, index_col=False) projectIDs = list( dt.projectID) # Only run analysis on projects that need it for projectID in projectIDs: if dt.loc[dt.projectID == projectID]['Labeled' + args.DataType] > 0: numbers[projectID] = dt.loc[dt.projectID == projectID][ 'Labeled' + args.DataType] # To run analysis efficiently, we download and upload data in the background while the main script runs for projectID, number in numbers.items(): print('Downloading: ' + projectID + ' ' + str(datetime.datetime.now()))
class CichlidTracker: def __init__(self): # 1: Define valid commands and ignore warnings self.commands = [ 'New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete', 'Snapshots' ] np.seterr(invalid='ignore') # 2: Determine which Kinect is attached (This script can handle v1 or v2 Kinects) self._identifyDevice() #Stored in self.device self.system = platform.node() # 3: Create file manager self.fileManager = FM() # 4: Download credential files self.fileManager.downloadData( self.fileManager.localCredentialSpreadsheet) self.fileManager.downloadData(self.fileManager.localCredentialDrive) self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet # Rename to make code readable # 5: Connect to Google Spreadsheets self._authenticateGoogleSpreadSheets() #Creates self.controllerGS self._modifyPiGS(error='') # 6: Start PiCamera self.camera = PiCamera() self.camera.resolution = (1296, 972) self.camera.framerate = 30 self.piCamera = 'True' # 7: Keep track of processes spawned to convert and upload videofiles self.processes = [] # 8: Set size of frame try: self.r except AttributeError: self.r = (0, 0, 640, 480) # 9: Await instructions self.monitorCommands() def __del__(self): # Try to close out files and stop running Kinects self._modifyPiGS(command='None', status='Stopped', error='UnknownError') if self.piCamera: if self.camera.recording: self.camera.stop_recording() self._print('PiCameraStopped: Time=' + str(datetime.datetime.now()) + ', File=Videos/' + str(self.videoCounter).zfill(4) + "_vid.h264") try: if self.device == 'kinect2': self.K2device.stop() if self.device == 'kinect': freenect.sync_stop() freenect.shutdown(self.a) except AttributeError: pass self._closeFiles() def monitorCommands(self, delta=10): # This function checks the master Controller Google Spreadsheet to determine if a command was issued (delta = seconds to recheck) while True: self._identifyTank() #Stored in self.tankID command, projectID = self._returnCommand() if projectID in ['', 'None']: self._reinstructError('ProjectID must be set') time.sleep(delta) continue if command != 'None': print(command + '\t' + projectID) self.fileManager.createProjectData(projectID) self.runCommand(command, projectID) self._modifyPiGS(status='AwaitingCommand') time.sleep(delta) def runCommand(self, command, projectID): # This function is used to run a specific command found int he master Controller Google Spreadsheet self.projectID = projectID # Rename files to make code more readable self.projectDirectory = self.fileManager.localProjectDir self.loggerFile = self.fileManager.localLogfile self.frameDirectory = self.fileManager.localFrameDir self.videoDirectory = self.fileManager.localVideoDir self.backupDirectory = self.fileManager.localBackupDir if command not in self.commands: self._reinstructError(command + ' is not a valid command. Options are ' + str(self.commands)) if command == 'Stop': if self.piCamera: if self.camera.recording: self.camera.stop_recording() self._print('PiCameraStopped: Time: ' + str(datetime.datetime.now()) + ',,File: Videos/' + str(self.videoCounter).zfill(4) + "_vid.h264") command = [ 'python3', 'unit_scripts/process_video.py', self.videoDirectory + str(self.videoCounter).zfill(4) + '_vid.h264' ] command += [str(self.camera.framerate[0]), self.projectID] self._print(command) self.processes.append(subprocess.Popen(command)) try: if self.device == 'kinect2': self.K2device.stop() if self.device == 'kinect': freenect.sync_stop() freenect.shutdown(self.a) except: self._print('ErrorStopping kinect') self._closeFiles() self._modifyPiGS(command='None', status='AwaitingCommand') return if command == 'UploadData': self._modifyPiGS(command='None') self._uploadFiles() return if command == 'LocalDelete': if os.path.exists(self.projectDirectory): shutil.rmtree(self.projectDirectory) self._modifyPiGS(command='None', status='AwaitingCommand') return self._modifyPiGS(command='None', status='Running', error='') if command == 'New': # Project Directory should not exist. If it does, report error if os.path.exists(self.projectDirectory): self._reinstructError( 'New command cannot be run if ouput directory already exists. Use Rewrite or Restart' ) if command == 'Rewrite': if os.path.exists(self.projectDirectory): shutil.rmtree(self.projectDirectory) os.makedirs(self.projectDirectory) if command in ['New', 'Rewrite']: self.masterStart = datetime.datetime.now() if command == 'New': os.makedirs(self.projectDirectory) os.makedirs(self.frameDirectory) os.makedirs(self.videoDirectory) os.makedirs(self.backupDirectory) #self._createDropboxFolders() self.frameCounter = 1 self.videoCounter = 1 if command == 'Restart': logObj = LP(self.loggerFile) self.masterStart = logObj.master_start #self.r = logObj.bounding_shape self.frameCounter = logObj.lastFrameCounter + 1 self.videoCounter = logObj.lastVideoCounter + 1 if self.system != logObj.system or self.device != logObj.device or self.piCamera != logObj.camera: self._reinstructError( 'Restart error. System, device, or camera does not match what is in logfile' ) self.lf = open(self.loggerFile, 'a') self._modifyPiGS(start=str(self.masterStart)) if command in ['New', 'Rewrite']: self._print('MasterStart: System: ' + self.system + ',,Device: ' + self.device + ',,Camera: ' + str(self.piCamera) + ',,Uname: ' + str(platform.uname()) + ',,TankID: ' + self.tankID + ',,ProjectID: ' + self.projectID) self._print('MasterRecordInitialStart: Time: ' + str(self.masterStart)) self._print( 'PrepFiles: FirstDepth: PrepFiles/FirstDepth.npy,,LastDepth: PrepFiles/LastDepth.npy,,PiCameraRGB: PiCameraRGB.jpg,,DepthRGB: DepthRGB.jpg' ) picamera_settings = { 'AnalogGain': str(self.camera.analog_gain), 'AWB_Gains': str(self.camera.awb_gains), 'AWB_Mode': str(self.camera.awb_mode), 'Brightness': str(self.camera.brightness), 'ClockMode': str(self.camera.clock_mode), 'Contrast': str(self.camera.contrast), 'Crop': str(self.camera.crop), 'DigitalGain': str(self.camera.digital_gain), 'ExposureCompensation': str(self.camera.exposure_compensation), 'ExposureMode': str(self.camera.exposure_mode), 'ExposureSpeed': str(self.camera.exposure_speed), 'FrameRate': str(self.camera.framerate), 'ImageDenoise': str(self.camera.image_denoise), 'MeterMode': str(self.camera.meter_mode), 'RawFormat': str(self.camera.raw_format), 'Resolution': str(self.camera.resolution), 'Saturation': str(self.camera.saturation), 'SensorMode': str(self.camera.sensor_mode), 'Sharpness': str(self.camera.sharpness), 'ShutterSpeed': str(self.camera.shutter_speed), 'VideoDenoise': str(self.camera.video_denoise), 'VideoStabilization': str(self.camera.video_stabilization) } self._print('PiCameraSettings: ' + ',,'.join([ x + ': ' + picamera_settings[x] for x in sorted(picamera_settings.keys()) ])) #self._createROI(useROI = False) else: self._print('MasterRecordRestart: Time: ' + str(datetime.datetime.now())) # Start kinect self._start_kinect() # Diagnose speed self._diagnose_speed() # Capture data self.captureFrames() def captureFrames(self, frame_delta=5, background_delta=5, max_frames=20, stdev_threshold=20): current_background_time = datetime.datetime.now() current_frame_time = current_background_time + datetime.timedelta( seconds=60 * frame_delta) command = '' while True: self._modifyPiGS(command='None', status='Running', error='') # Grab new time now = datetime.datetime.now() # Fix camera if it needs to be if self.piCamera: if self._video_recording() and not self.camera.recording: self.camera.capture(self.videoDirectory + str(self.videoCounter).zfill(4) + "_pic.jpg") self.camera.start_recording( self.videoDirectory + str(self.videoCounter).zfill(4) + "_vid.h264", bitrate=7500000) self._print('PiCameraStarted: FrameRate: ' + str(self.camera.framerate) + ',,Resolution: ' + str(self.camera.resolution) + ',,Time: ' + str(datetime.datetime.now()) + ',,VideoFile: Videos/' + str(self.videoCounter).zfill(4) + '_vid.h264,,PicFile: Videos/' + str(self.videoCounter).zfill(4) + '_pic.jpg') elif not self._video_recording() and self.camera.recording: self.camera.stop_recording() self._print('PiCameraStopped: Time: ' + str(datetime.datetime.now()) + ',, File: Videos/' + str(self.videoCounter).zfill(4) + "_vid.h264") #self._print(['rclone', 'copy', self.videoDirectory + str(self.videoCounter).zfill(4) + "_vid.h264"]) command = [ 'python3', 'unit_scripts/process_video.py', self.videoDirectory + str(self.videoCounter).zfill(4) + '_vid.h264' ] command += [str(self.camera.framerate[0]), self.projectID] self._print(command) self.processes.append(subprocess.Popen(command)) self.videoCounter += 1 # Capture a frame and background if necessary if now > current_background_time: if command == 'Snapshots': out = self._captureFrame(current_frame_time, max_frames=max_frames, stdev_threshold=stdev_threshold, snapshots=True) else: out = self._captureFrame(current_frame_time, max_frames=max_frames, stdev_threshold=stdev_threshold) if out is not None: current_background_time += datetime.timedelta( seconds=60 * background_delta) subprocess.Popen([ 'python3', 'unit_scripts/drive_updater.py', self.loggerFile ]) else: if command == 'Snapshots': out = self._captureFrame(current_frame_time, max_frames=max_frames, stdev_threshold=stdev_threshold, snapshots=True) else: out = self._captureFrame(current_frame_time, max_frames=max_frames, stdev_threshold=stdev_threshold) current_frame_time += datetime.timedelta(seconds=60 * frame_delta) self._modifyPiGS(status='Running') # Check google doc to determine if recording has changed. try: command, projectID = self._returnCommand() except KeyError: continue if command != 'None': if command == 'Snapshots': self._modifyPiGS(command='None', status='Writing Snapshots') continue else: break else: self._modifyPiGS(error='') def _authenticateGoogleSpreadSheets(self): scope = [ "https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/spreadsheets" ] credentials = ServiceAccountCredentials.from_json_keyfile_name( self.credentialSpreadsheet, scope) for i in range(0, 3): # Try to autheticate three times before failing try: gs = gspread.authorize(credentials) except: continue try: self.controllerGS = gs.open('Controller') pi_ws = self.controllerGS.worksheet('RaspberryPi') except: continue try: headers = pi_ws.row_values(1) except: continue column = headers.index('RaspberryPiID') + 1 try: pi_ws.col_values(column).index(platform.node()) return True except ValueError: s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) ip = s.getsockname()[0] s.close() try: pi_ws.append_row([ platform.node(), ip, '', '', '', '', '', 'None', 'Stopped', 'Error: Awaiting assignment of TankID', str(datetime.datetime.now()) ]) except: continue return True except: continue time.sleep(2) return False def _identifyDevice(self): try: global freenect import freenect self.a = freenect.init() if freenect.num_devices(self.a) == 0: kinect = False elif freenect.num_devices(self.a) > 1: self._initError( 'Multiple Kinect1s attached. Unsure how to handle') else: kinect = True except ImportError: kinect = False try: global rs import pyrealsense2 as rs ctx = rs.context() if len(ctx.devices) == 0: realsense = False if len(ctx.devices) > 1: self._initError( 'Multiple RealSense devices attached. Unsure how to handle' ) else: realsense = True except ImportError: realsense = False if kinect and realsense: self._initError( 'Kinect1 and RealSense devices attached. Unsure how to handle') elif not kinect and not realsense: self._initError('No depth sensor attached') elif kinect: self.device = 'kinect' else: self.device = 'realsense' def _identifyTank(self): while True: self._authenticateGoogleSpreadSheets( ) # link to google drive spreadsheet stored in self.controllerGS pi_ws = self.controllerGS.worksheet('RaspberryPi') headers = pi_ws.row_values(1) raPiID_col = headers.index('RaspberryPiID') + 1 for i in range(5): try: row = pi_ws.col_values(raPiID_col).index( platform.node()) + 1 break except: continue col = headers.index('TankID') if pi_ws.row_values(row)[col] not in ['None', '']: self.tankID = pi_ws.row_values(row)[col] for i in range(5): try: self._modifyPiGS(capability='Device=' + self.device + ',Camera=' + str(self.piCamera), status='AwaitingCommand') return except: continue return else: self._modifyPiGS(error='Awaiting assignment of TankID') time.sleep(5) def _initError(self, message): try: self._modifyPiGS(command='None', status='Stopped', error='InitError: ' + message) except: pass self._print('InitError: ' + message) raise TypeError def _reinstructError(self, message): self._modifyPiGS(command='None', status='AwaitingCommands', error='InstructError: ' + message) # Update google doc to indicate error self.monitorCommands() def _print(self, text): try: print(text, file=self.lf, flush=True) except: pass print(text, file=sys.stderr, flush=True) def _returnRegColor(self, crop=True): # This function returns a registered color array if self.device == 'kinect': out = freenect.sync_get_video()[0] if self.device == 'realsense': frames = self.pipeline.wait_for_frames() color_frame = frames.get_color_frame() out = np.asanyarray(color_frame.get_data()) if crop: return out[self.r[1]:self.r[1] + self.r[3], self.r[0]:self.r[0] + self.r[2]] else: return out def _returnDepth(self): # This function returns a float64 npy array containing one frame of data with all bad data as NaNs if self.device == 'kinect': data = freenect.sync_get_depth()[0].astype('float64') data[data == 2047] = np.nan # 2047 indicates bad data from Kinect return data[self.r[1]:self.r[1] + self.r[3], self.r[0]:self.r[0] + self.r[2]] if self.device == 'realsense': depth_frame = self.pipeline.wait_for_frames().get_depth_frame( ).as_depth_frame() data = np.asanyarray(depth_frame.data) * depth_frame.get_units( ) # Convert to meters data[data == 0] = np.nan # 0 indicates bad data from RealSense data[data > 1] = np.nan # Anything further away than 1 m is a mistake return data[self.r[1]:self.r[1] + self.r[3], self.r[0]:self.r[0] + self.r[2]] def _returnCommand(self): if not self._authenticateGoogleSpreadSheets(): raise KeyError # link to google drive spreadsheet stored in self.controllerGS while True: try: pi_ws = self.controllerGS.worksheet('RaspberryPi') headers = pi_ws.row_values(1) piIndex = pi_ws.col_values(headers.index('RaspberryPiID') + 1).index(platform.node()) command = pi_ws.col_values(headers.index('Command') + 1)[piIndex] projectID = pi_ws.col_values(headers.index('ProjectID') + 1)[piIndex] return command, projectID except gspread.exceptions.RequestError: continue def _modifyPiGS(self, start=None, command=None, status=None, IP=None, capability=None, error=None): while not self._authenticateGoogleSpreadSheets( ): # link to google drive spreadsheet stored in self.controllerGS continue try: pi_ws = self.controllerGS.worksheet('RaspberryPi') headers = pi_ws.row_values(1) row = pi_ws.col_values(headers.index('RaspberryPiID') + 1).index( platform.node()) + 1 if start is not None: column = headers.index('MasterStart') + 1 pi_ws.update_cell(row, column, start) if command is not None: column = headers.index('Command') + 1 pi_ws.update_cell(row, column, command) if status is not None: column = headers.index('Status') + 1 pi_ws.update_cell(row, column, status) if error is not None: column = headers.index('Error') + 1 pi_ws.update_cell(row, column, error) if IP is not None: column = headers.index('IP') + 1 pi_ws.update_cell(row, column, IP) if capability is not None: column = headers.index('Capability') + 1 pi_ws.update_cell(row, column, capability) column = headers.index('Ping') + 1 pi_ws.update_cell(row, column, str(datetime.datetime.now())) except gspread.exceptions.RequestError as e: self._print('GoogleError: Time: ' + str(datetime.datetime.now()) + ',,Error: ' + str(e)) except TypeError: self._print( 'GoogleError: Time: ' + str(datetime.datetime.now()) + ',,Error: Unknown. Gspread does not handle RequestErrors properly' ) def _video_recording(self): if datetime.datetime.now().hour >= 8 and datetime.datetime.now( ).hour <= 18: return True else: return False def _start_kinect(self): if self.device == 'kinect': freenect.sync_get_depth() #Grabbing a frame initializes the device freenect.sync_get_video() elif self.device == 'realsense': # Create a context object. This object owns the handles to all connected realsense devices self.pipeline = rs.pipeline() # Configure streams config = rs.config() config.enable_stream(rs.stream.depth, rs.format.z16, 30) config.enable_stream(rs.stream.color, rs.format.rgb8, 30) # Start streaming self.pipeline.start(config) frames = self.pipeline.wait_for_frames() depth = frames.get_depth_frame() self.r = (0, 0, depth.width, depth.height) def _diagnose_speed(self, time=10): print('Diagnosing speed for ' + str(time) + ' seconds.', file=sys.stderr) delta = datetime.timedelta(seconds=time) start_t = datetime.datetime.now() counter = 0 while True: depth = self._returnDepth() counter += 1 if datetime.datetime.now() - start_t > delta: break #Grab single snapshot of depth and save it depth = self._returnDepth() np.save(self.projectDirectory + 'Frames/FirstFrame.npy', depth) #Grab a bunch of depth files to characterize the variability data = np.zeros(shape=(50, self.r[3], self.r[2])) for i in range(0, 50): data[i] = self._returnDepth() counts = np.count_nonzero(~np.isnan(data), axis=0) std = np.nanstd(data, axis=0) np.save(self.projectDirectory + 'Frames/FirstDataCount.npy', counts) np.save(self.projectDirectory + 'Frames/StdevCount.npy', std) self._print('DiagnoseSpeed: Rate: ' + str(counter / time)) self._print( 'FirstFrameCaptured: FirstFrame: Frames/FirstFrame.npy,,GoodDataCount: Frames/FirstDataCount.npy,,StdevCount: Frames/StdevCount.npy' ) def _captureFrame(self, endtime, max_frames=40, stdev_threshold=.05, snapshots=False): # Captures time averaged frame of depth data sums = np.zeros(shape=(self.r[3], self.r[2])) n = np.zeros(shape=(self.r[3], self.r[2])) stds = np.zeros(shape=(self.r[3], self.r[2])) current_time = datetime.datetime.now() if current_time >= endtime: return counter = 1 all_data = np.empty(shape=(int(max_frames), self.r[3], self.r[2])) all_data[:] = np.nan for i in range(0, max_frames): all_data[i] = self._returnDepth() current_time = datetime.datetime.now() if snapshots: self._print('SnapshotCaptured: NpyFile: Frames/Snapshot_' + str(counter).zfill(6) + '.npy,,Time: ' + str(current_time) + ',,GP: ' + str(np.count_nonzero(~np.isnan(all_data[i])))) np.save( self.projectDirectory + 'Frames/Snapshot_' + str(counter).zfill(6) + '.npy', all_data[i]) counter += 1 if current_time >= endtime: break time.sleep(10) med = np.nanmean(all_data, axis=0) std = np.nanstd(all_data, axis=0) med[np.isnan(std)] = np.nan med[std > stdev_threshold] = np.nan std[std > stdev_threshold] = np.nan counts = np.count_nonzero(~np.isnan(all_data), axis=0) med[counts < 3] = np.nan std[counts < 3] = np.nan color = self._returnRegColor() self._print('FrameCaptured: NpyFile: Frames/Frame_' + str(self.frameCounter).zfill(6) + '.npy,,PicFile: Frames/Frame_' + str(self.frameCounter).zfill(6) + '.jpg,,Time: ' + str(endtime) + ',,NFrames: ' + str(i) + ',,AvgMed: ' + '%.2f' % np.nanmean(med) + ',,AvgStd: ' + '%.2f' % np.nanmean(std) + ',,GP: ' + str(np.count_nonzero(~np.isnan(med)))) np.save( self.projectDirectory + 'Frames/Frame_' + str(self.frameCounter).zfill(6) + '.npy', med) matplotlib.image.imsave( self.projectDirectory + 'Frames/Frame_' + str(self.frameCounter).zfill(6) + '.jpg', color) self.frameCounter += 1 return med def _uploadFiles(self): self._modifyPiGS(status='Finishing converting and uploading of videos') for p in self.processes: p.communicate() for movieFile in os.listdir(self.videoDirectory): if '.h264' in movieFile: command = [ 'python3', 'unit_scripts/process_video.py', self.videoDirectory + movieFile ] command += [str(self.camera.framerate[0]), self.projectID] self._print(command) self.processes.append(subprocess.Popen(command)) for p in self.processes: p.communicate() self._modifyPiGS(status='Creating prep files') # Move files around as appropriate prepDirectory = self.projectDirectory + 'PrepFiles/' shutil.rmtree(prepDirectory) if os.path.exists(prepDirectory) else None os.makedirs(prepDirectory) lp = LP(self.loggerFile) self.frameCounter = lp.lastFrameCounter + 1 videoObj = [ x for x in lp.movies if x.startTime.hour >= 8 and x.startTime.hour <= 20 ][0] subprocess.call([ 'cp', self.projectDirectory + videoObj.pic_file, prepDirectory + 'PiCameraRGB.jpg' ]) subprocess.call([ 'cp', self.projectDirectory + lp.movies[-1].pic_file, prepDirectory + 'LastPiCameraRGB.jpg' ]) # Find depthfile that is closest to the video file time depthObj = [x for x in lp.frames if x.time > videoObj.startTime][0] subprocess.call([ 'cp', self.projectDirectory + depthObj.pic_file, prepDirectory + 'DepthRGB.jpg' ]) if not os.path.isdir(self.frameDirectory): self._modifyPiGS(status='Error: ' + self.frameDirectory + ' does not exist.') return subprocess.call([ 'cp', self.frameDirectory + 'Frame_000001.npy', prepDirectory + 'FirstDepth.npy' ]) subprocess.call([ 'cp', self.frameDirectory + 'Frame_' + str(self.frameCounter - 1).zfill(6) + '.npy', prepDirectory + 'LastDepth.npy' ]) try: self._modifyPiGS(status='Uploading data to cloud') self.fileManager.uploadData(self.frameDirectory, tarred=True) #print(prepDirectory) self.fileManager.uploadData(prepDirectory) #print(self.videoDirectory) self.fileManager.uploadData(self.videoDirectory) #print(self.loggerFile) self.fileManager.uploadData(self.loggerFile) self._modifyPiGS(error='UploadSuccessful, ready for delete') except Exception as e: print('UploadError: ' + str(e)) self._modifyPiGS(error='UploadFailed, Need to rerun') raise Exception def _closeFiles(self): try: self._print('MasterRecordStop: ' + str(datetime.datetime.now())) self.lf.close() except AttributeError: pass try: if self.system == 'mac': self.caff.kill() except AttributeError: pass
class ProjectPreparer(): # This class takes in a projectID and runs all the appropriate analysis def __init__(self, projectID=None, modelID=None, workers=None, summaryFile=None): self.projectID = projectID self.fileManager = FM(projectID=projectID, modelID=modelID, summaryFile=summaryFile) self.modelID = modelID if not self._checkProjectID(): raise Exception(projectID + ' is not valid.') self.workers = workers def _checkProjectID(self): if self.projectID is None: return True projectIDs = subprocess.run([ 'rclone', 'lsf', self.fileManager.cloudMasterDir + '__ProjectData/' ], capture_output=True, encoding='utf-8').stdout.split() if self.projectID + '/' in projectIDs: return True else: return False def downloadData(self, dtype, videoIndex=None): self.fileManager.downloadProjectData(dtype, videoIndex) def uploadData(self, dtype, videoIndex=None, delete=False, no_upload=False): print(no_upload) self.fileManager.uploadProjectData(dtype, videoIndex, delete, no_upload) def runPrepAnalysis(self): prp_obj = PrP(self.fileManager) prp_obj.validateInputData() prp_obj.prepData() def runDepthAnalysis(self): dp_obj = DP(self.fileManager) dp_obj.validateInputData() dp_obj.createSmoothedArray() dp_obj.createRGBVideo() def runClusterAnalysis(self, videoIndex): if videoIndex is None: videos = list(range(len(self.fileManager.lp.movies))) else: videos = [videoIndex] for videoIndex in videos: cp_obj = CP(self.fileManager, videoIndex, self.workers) cp_obj.validateInputData() cp_obj.runClusterAnalysis() def run3DClassification(self): tdcp_obj = TDCP(self.fileManager) tdcp_obj.validateInputData() tdcp_obj.predictLabels() tdcp_obj.createSummaryFile() def manuallyLabelVideos(self, initials, number): mlv_obj = MLVP(self.fileManager, initials, number) mlv_obj.validateInputData() mlv_obj.labelVideos() def manuallyLabelFrames(self, initials, number): mlf_obj = MLFP(self.fileManager, initials, number) mlf_obj.validateInputData() mlf_obj.labelFrames() def createModel(self, MLtype, projectIDs, gpu): if MLtype == '3DResnet': tdm_obj = TDMP(self.fileManager, projectIDs, self.modelID, gpu) tdm_obj.validateInputData() tdm_obj.create3DModel() def runMLFishDetection(self): pass def runSummaryCreation(self): sp_obj = SP(self.fileManager) sp_obj.createFullSummary() def backupAnalysis(self): uploadCommands = set() uploadFiles = [ x for x in os.listdir(self.fileManager.localUploadDir) if 'UploadData' in x ] for uFile in uploadFiles: with open(self.fileManager.localUploadDir + uFile) as f: line = next(f) for line in f: tokens = line.rstrip().split(',') tokens[2] = bool(int(tokens[2])) uploadCommands.add(tuple(tokens)) for command in uploadCommands: self.fileManager.uploadData(command[0], command[1], command[2]) for uFile in uploadFiles: subprocess.run( ['rm', '-rf', self.fileManager.localUploadDir + uFile]) self.fileManager.uploadData(self.fileManager.localAnalysisLogDir, self.fileManager.cloudAnalysisLogDir, False) subprocess.run(['rm', '-rf', self.projFileManager.localMasterDir]) def localDelete(self): subprocess.run(['rm', '-rf', self.fileManager.localProjectDir]) def createUploadFile(self, uploads): with open( self.fileManager.localUploadDir + 'UploadData_' + str(datetime.datetime.now().timestamp()) + '.csv', 'w') as f: print('Local,Cloud,Tar', file=f) for upload in uploads: print(upload[0] + ',' + upload[1] + ',' + str(upload[2]), file=f) def createAnalysisUpdate(self, aType, procObj): now = datetime.datetime.now() with open( self.fileManager.localAnalysisLogDir + 'AnalysisUpdate_' + str(now.timestamp()) + '.csv', 'w') as f: print('ProjectID,Type,Version,Date', file=f) print(self.projectID + ',' + aType + ',' + procObj.__version__ + '_' + os.getenv('USER') + ',' + str(now), file=f)
f = open(fileManager.localProjectDir + 'VideoProcessLog.txt', 'a') if indent: print(' ' + str(datetime.datetime.now()) + ': ' + str(message), file = f) else: print(str(datetime.datetime.now()) + ': ' + str(message), file = f) f.close() parser = argparse.ArgumentParser() parser.add_argument('VideoFile', type = str, help = 'Name of h264 file to be processed') parser.add_argument('Framerate', type = float, help = 'Video framerate') parser.add_argument('ProjectID', type = str, help = 'Video framerate') args = parser.parse_args() fileManager = FM(projectID = args.ProjectID) if '.h264' not in args.VideoFile: logPrinter(args.VideoFile + ' not an h264 file', indent = False) raise Exception(args.VideoFile + ' not an h264 file') # Convert h264 to mp4 if os.path.exists(args.VideoFile.replace('.h264', '.mp4')): logPrinter(args.VideoFile.replace('.h264', '.mp4') + ' already exits. Deleting') subprocess.run(['rm', '-f', args.VideoFile.replace('.h264', '.mp4')]) command = ['ffmpeg', '-r', str(args.Framerate), '-i', args.VideoFile, '-threads', '1', '-c:v', 'copy', '-r', str(args.Framerate), args.VideoFile.replace('.h264', '.mp4')] logPrinter('Beginning conversion of video: ' + args.VideoFile.split('/')[-1], indent = False) logPrinter(command) ffmpeg_output = subprocess.run(command, capture_output = True) try:
import argparse, subprocess from cichlid_bower_tracking.helper_modules.object_labeler import AnnotationDisagreements as AD from cichlid_bower_tracking.helper_modules.file_manager import FileManager as FM parser = argparse.ArgumentParser() parser.add_argument('User1', type = str, help = 'Initials of user annotations to compare') parser.add_argument('User2', type = str, help = 'Initials user annotations to compare') parser.add_argument('ProjectID', type = str, help = 'Project to analyze') parser.add_argument('-p', '--Practice', action = 'store_true', help = 'Use if you dont want to save annotations') args = parser.parse_args() fm_obj = FM(projectID = args.ProjectID) fm_obj.downloadData(fm_obj.localLabeledClipsProjectDir, tarred = True) fm_obj.downloadData(fm_obj.localBoxedFishFile) obj = AD(self.fileManager.localLabeledFramesProjectDir, self.fileManager.localBoxedFishFile, args.ProjectID, args.User1, args.User2) ad_obj = AD(fm_obj.localBoxedFishDir + args.ProjectID + '/', temp_dt, args.ProjectID, args.User1, args.User2, args.All) # Redownload csv in case new annotations have been added fm_obj.downloadData(fm_obj.localBoxedFishFile) old_dt = pd.read_csv(fm_obj.localBoxedFishFile, index_col = 0) new_dt = pd.read_csv(temp_dt) old_dt = old_dt.append(new_dt, sort = 'False').drop_duplicates(subset = ['ProjectID', 'Framefile', 'User', 'Sex', 'Box'], keep = 'last').sort_values(by = ['ProjectID', 'Framefile']) old_dt.to_csv(fm_obj.localBoxedFishFile, sep = ',', columns = ['ProjectID', 'Framefile', 'Nfish', 'Sex', 'Box', 'CorrectAnnotation','User', 'DateTime']) if not args.Practice:
from cichlid_bower_tracking.helper_modules.file_manager import FileManager as FM parser = argparse.ArgumentParser( description= 'This script is used to determine analysis states for each project.') parser.add_argument( '--SummaryFile', type=str, help= 'Restrict analysis to projectIDs specified in csv file, which will be rewritten. ProjectIDs must be found in a column called projectID' ) args = parser.parse_args() fm_obj = FM(summaryFile=args.SummaryFile) if args.SummaryFile is not None: summary_file = fm_obj.localSummaryFile fm_obj.downloadData(summary_file) dt = pd.read_csv(summary_file, index_col=False) projectIDs = list(dt.projectID) else: fm_obj.createDirectory(fm_obj.localAnalysisStatesDir) summary_file = fm_obj.localAnalysisStatesDir + 'AllProjects.csv' projectIDs = fm_obj.getAllProjectIDs() dt = pd.DataFrame(columns=[ 'projectID', 'tankID', 'StartingFiles', 'Prep', 'Depth', 'Cluster', 'ClusterClassification', 'LabeledVideos', 'LabeledFrames', 'Summary' ])
import subprocess, gspread, pdb from cichlid_bower_tracking.helper_modules.file_manager import FileManager as FM import pandas as pd # Requires ttab https://www.npmjs.com/package/ttab#manual-installation fileManager = FM() fileManager.downloadData(fileManager.localCredentialDir) gs = gspread.service_account(filename=fileManager.localCredentialSpreadsheet) controllerGS = gs.open('Controller') pi_ws = controllerGS.worksheet('RaspberryPi') data = pi_ws.get_all_values() dt = pd.DataFrame(data[1:], columns=data[0]) for row in dt.RaspberryPiID: print(row) #subprocess.run(['ssh-keygen', '-t', 'rsa', '-f', '~/.ssh/id_rsa']) #subprocess.run(['ssh-copy-id', 'pi@' + row + '.biosci.gatech.edu']) for row in dt.RaspberryPiID: subprocess.run( ['ttab', '-t', row, 'ssh', 'pi@' + row + '.biosci.gatech.edu'])
help='ModelID to use to classify clusters with') parser.add_argument( '--Force', type=bool, default=False, help= 'if True, run the analysis even if the summary file indicates is has already been run. Default False' ) args = parser.parse_args() # Identify projects to run analysis on if args.ProjectIDs is not None: projectIDs = args.ProjectIDs # Specified at the command line else: fm_obj = FM(summaryFile=args.SummaryFile) fm_obj.downloadData(fm_obj.localSummaryFile) dt = pd.read_csv(fm_obj.localSummaryFile) if args.Force: projectIDs = list(dt.projectID) else: projectIDs = list( dt[dt[args.AnalysisType] == False].projectID) # Only run analysis on projects that need it if args.Workers is None: workers = os.cpu_count() else: workers = args.Workers
class CichlidTracker: def __init__(self, all_data): self.all_data = all_data # Flag to keep all data if desired # 1: Define valid commands and ignore warnings self.commands = ['New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete'] np.seterr(invalid='ignore') # 2: Determine which depth sensor is attached (This script can handle DepthSense cameras) self._identifyDevice() #Stored in self.device self.system = platform.node() # 3: Create file manager self.fileManager = FM() # 4: Start PiCamera self.camera = PiCamera() self.camera.resolution = (1296, 972) self.camera.framerate = 30 self.piCamera = 'True' # 5: Download credential files self.fileManager.downloadData(self.fileManager.localCredentialDir) self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet # Rename to make code readable self._authenticateGoogleSpreadSheets() #Creates self.controllerGS self._identifyTank() #Stored in self.tankID self._identifyServiceAccount() # 6: Keep track of processes spawned to convert and upload videofiles self.processes = [] # 7: Set size of frame try: self.r except AttributeError: self.r = (0,0,640,480) # 9: Await instructions print('Monitoring commands') self.monitorCommands() def __del__(self): # Try to close out files and stop running Kinects self._modifyPiGS('Command','None', ping = False) self._modifyPiGS('Status','Stopped', ping = False) self._modifyPiGS('Error','UnknownError', ping = False) if self.piCamera: if self.camera.recording: self.camera.stop_recording() self._print('PiCameraStopped: Time=' + str(datetime.datetime.now()) + ', File=Videos/' + str(self.videoCounter).zfill(4) + "_vid.h264") if self.device == 'realsense': self.pipeline.stop() self._closeFiles() def monitorCommands(self, delta = 20): # This function checks the master Controller Google Spreadsheet to determine if a command was issued (delta = seconds to recheck) self._modifyPiGS('Status', 'AwaitingCommand') self._modifyPiGS('Error', '', ping = False) while True: command, projectID = self._returnCommand() if projectID in ['','None']: self._reinstructError('ProjectID must be set') time.sleep(delta) continue if command not in ['None',None]: print(command + '\t' + projectID) self.fileManager.createProjectData(projectID) self.runCommand(command, projectID) time.sleep(delta) def runCommand(self, command, projectID): # This function is used to run a specific command found in the master Controller Google Spreadsheet self.projectID = projectID # Rename files to make code more readable self.projectDirectory = self.fileManager.localProjectDir self.loggerFile = self.fileManager.localLogfile self.googleErrorFile = self.fileManager.localProjectDir + 'GoogleErrors.txt' self.frameDirectory = self.fileManager.localFrameDir self.videoDirectory = self.fileManager.localVideoDir self.backupDirectory = self.fileManager.localBackupDir if command not in self.commands: self._reinstructError(command + ' is not a valid command. Options are ' + str(self.commands)) if command == 'Stop': if self.piCamera: if self.camera.recording: self.camera.stop_recording() self._print('PiCameraStopped: Time: ' + str(datetime.datetime.now()) + ',,File: Videos/' + str(self.videoCounter).zfill(4) + "_vid.h264") command = ['python3', 'unit_scripts/process_video.py', self.videoDirectory + str(self.videoCounter).zfill(4) + '_vid.h264'] command += [str(self.camera.framerate[0]), self.projectID] self._print(command) self.processes.append(subprocess.Popen(command)) try: if self.device == 'realsense': self.pipeline.stop() except Exception as e: self._googlePrint(e) self._print('ErrorStopping kinect') self._closeFiles() self._modifyPiGS('Command', 'None', ping = False) self._modifyPiGS('Status', 'AwaitingCommand', ping = False) return if command == 'UploadData': self._modifyPiGS('Command', 'None') self._uploadFiles() return if command == 'LocalDelete': if os.path.exists(self.projectDirectory): shutil.rmtree(self.projectDirectory) self._modifyPiGS('Command', 'None', ping = False) self._modifyPiGS('Status', 'AwaitingCommand', ping = False) return self._modifyPiGS('Command', 'None', ping = False) self._modifyPiGS('Status', 'Running', ping = False) self._modifyPiGS('Error', '', ping = False) if command == 'New': # Project Directory should not exist. If it does, report error if os.path.exists(self.projectDirectory): self._reinstructError('New command cannot be run if ouput directory already exists. Use Rewrite or Restart') if command == 'Rewrite': if os.path.exists(self.projectDirectory): shutil.rmtree(self.projectDirectory) if command in ['New','Rewrite']: self.masterStart = datetime.datetime.now() os.makedirs(self.projectDirectory) os.makedirs(self.frameDirectory) os.makedirs(self.videoDirectory) os.makedirs(self.backupDirectory) #self._createDropboxFolders() self.frameCounter = 1 self.videoCounter = 1 if command == 'Restart': logObj = LP(self.loggerFile) self.masterStart = logObj.master_start self.frameCounter = logObj.lastFrameCounter + 1 self.videoCounter = logObj.lastVideoCounter + 1 if self.system != logObj.system or self.device != logObj.device: self._reinstructError('Restart error. LogData: ' + ','.join([str(x) for x in [logObj.system,logObj.device,logObj.camera]]) + ',, SystemData: ' + ','.join([str(x) for x in [self.system, self.device, self.camera]])) return if self.device != 'None': row, column, ping_column = self._getRowColumn('Image') subprocess.Popen(['python3', 'unit_scripts/drive_updater.py', self.loggerFile, str(row), str(column)]) pass self.lf = open(self.loggerFile, 'a', buffering = 1) # line buffered self.g_lf = open(self.googleErrorFile, 'a', buffering = 1) self._modifyPiGS('MasterStart',str(self.masterStart), ping = False) if command in ['New', 'Rewrite']: self._print('MasterStart: System: '+self.system + ',,Device: ' + self.device + ',,Camera: ' + str(self.piCamera) + ',,Uname: ' + str(platform.uname()) + ',,TankID: ' + self.tankID + ',,ProjectID: ' + self.projectID) self._print('MasterRecordInitialStart: Time: ' + str(self.masterStart)) self._print('PrepFiles: FirstDepth: PrepFiles/FirstDepth.npy,,LastDepth: PrepFiles/LastDepth.npy,,PiCameraRGB: PiCameraRGB.jpg,,DepthRGB: DepthRGB.jpg') picamera_settings = {'AnalogGain': str(self.camera.analog_gain), 'AWB_Gains': str(self.camera.awb_gains), 'AWB_Mode': str(self.camera.awb_mode), 'Brightness': str(self.camera.brightness), 'ClockMode': str(self.camera.clock_mode), 'Contrast': str(self.camera.contrast), 'Crop': str(self.camera.crop),'DigitalGain': str(self.camera.digital_gain), 'ExposureCompensation': str(self.camera.exposure_compensation),'ExposureMode': str(self.camera.exposure_mode), 'ExposureSpeed': str(self.camera.exposure_speed),'FrameRate': str(self.camera.framerate), 'ImageDenoise': str(self.camera.image_denoise),'MeterMode': str(self.camera.meter_mode), 'RawFormat': str(self.camera.raw_format), 'Resolution': str(self.camera.resolution), 'Saturation': str(self.camera.saturation),'SensorMode': str(self.camera.sensor_mode), 'Sharpness': str(self.camera.sharpness),'ShutterSpeed': str(self.camera.shutter_speed), 'VideoDenoise': str(self.camera.video_denoise),'VideoStabilization': str(self.camera.video_stabilization)} self._print('PiCameraSettings: ' + ',,'.join([x + ': ' + picamera_settings[x] for x in sorted(picamera_settings.keys())])) #self._createROI(useROI = False) else: self._print('MasterRecordRestart: Time: ' + str(datetime.datetime.now())) # Start kinect if self.device != 'None': self._start_kinect() # Diagnose speed self._diagnose_speed() # Capture data self.captureFrames() def captureFrames(self, frame_delta = 5, background_delta = 5): current_background_time = datetime.datetime.now() current_frame_time = current_background_time + datetime.timedelta(seconds = 60 * frame_delta) command = '' while True: # Grab new time now = datetime.datetime.now() # Fix camera if it needs to be if self.piCamera: if self._video_recording() and not self.camera.recording: self.camera.capture(self.videoDirectory + str(self.videoCounter).zfill(4) + "_pic.jpg") self._print('PiCameraStarted: FrameRate: ' + str(self.camera.framerate) + ',,Resolution: ' + str(self.camera.resolution) + ',,Time: ' + str(datetime.datetime.now()) + ',,VideoFile: Videos/' + str(self.videoCounter).zfill(4) + '_vid.h264,,PicFile: Videos/' + str(self.videoCounter).zfill(4) + '_pic.jpg') self.camera.start_recording(self.videoDirectory + str(self.videoCounter).zfill(4) + "_vid.h264", bitrate=7500000) elif not self._video_recording() and self.camera.recording: self._print('PiCameraStopped: Time: ' + str(datetime.datetime.now()) + ',, File: Videos/' + str(self.videoCounter).zfill(4) + "_vid.h264") self.camera.stop_recording() #self._print(['rclone', 'copy', self.videoDirectory + str(self.videoCounter).zfill(4) + "_vid.h264"]) command = ['python3', 'unit_scripts/process_video.py', self.videoDirectory + str(self.videoCounter).zfill(4) + '_vid.h264'] command += [str(self.camera.framerate[0]), self.projectID] self._print(command) self.processes.append(subprocess.Popen(command)) self.videoCounter += 1 # Capture a frame and background if necessary if self.device != 'None': if now > current_background_time: out = self._captureFrame(current_frame_time, keep_all_data = self.all_data) if out is not None: current_background_time += datetime.timedelta(seconds = 60 * background_delta) row, column, ping_column = self._getRowColumn('Image') subprocess.Popen(['python3', 'unit_scripts/drive_updater.py', self.loggerFile, str(row), str(column)]) else: out = self._captureFrame(current_frame_time, stdev_threshold = stdev_threshold, keep_all_data = self.all_data) else: while datetime.datetime.now() < current_frame_time: time.sleep(5) current_frame_time += datetime.timedelta(seconds = 60 * frame_delta) # Check google doc to determine if recording has changed. try: command, projectID = self._returnCommand() except KeyError: continue if command != 'None' and command is not None: break else: self._modifyPiGS('Error', '') def _authenticateGoogleSpreadSheets(self): # scope = [ # "https://spreadsheets.google.com/feeds", # "https://www.googleapis.com/auth/spreadsheets" # ] # credentials = ServiceAccountCredentials.from_json_keyfile_name(self.credentialSpreadsheet, scope) # Get IP address s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect(("8.8.8.8", 80)) self.IP = s.getsockname()[0] s.close() for i in range(0,3): # Try to autheticate three times before failing try: # gs = gspread.authorize(credentials) gs = gspread.service_account(filename=self.credentialSpreadsheet) except Exception as e: self._googlePrint(e) continue try: self.controllerGS = gs.open('Controller') self.pi_ws = self.controllerGS.worksheet('RaspberryPi') data = self.pi_ws.get_all_values() dt = pd.DataFrame(data[1:], columns = data[0]) except Exception as e: self._googlePrint(e) continue try: if len(dt.loc[dt.RaspberryPiID == platform.node()]) == 0: self.pi_ws.append_row([platform.node(),self.IP,'','','','','','None','Stopped','Error: Awaiting assignment of TankID',str(datetime.datetime.now())]) return True else: return True except Exception as e: self._googlePrint(e) continue time.sleep(2) return False def _identifyDevice(self): try: global rs import pyrealsense2 as rs ctx = rs.context() if len(ctx.devices) == 0: self.device = 'None' elif len(ctx.devices) > 1: self._initError('Multiple RealSense devices attached. Unsure how to handle') else: self.device = 'realsense' except Exception: self.device = 'None' def _identifyTank(self): while True: tankID = self._getPiGS('TankID') if tankID not in ['None','']: self.tankID = tankID self._modifyPiGS('Capability', 'Device=' + self.device + ',Camera=' + str(self.piCamera), ping = False) #self._modifyPiGS('Status', 'AwaitingCommand') break else: self._modifyPiGS('Error','Awaiting assignment of TankID') time.sleep(20) def _identifyServiceAccount(self): while True: serviceAccount = self._getPiGS('ServiceAccount') if serviceAccount not in ['None','']: self.serviceAccount = serviceAccount self.credentialSpreadsheet = self.credentialSpreadsheet.replace('_1.json', '_' + self.serviceAccount + '.json') self._authenticateGoogleSpreadSheets() #Creates self.controllerGS break else: self._modifyPiGS('Error','Awaiting assignment of ServiceAccount') time.sleep(20) def _initError(self, message): try: self._modifyPiGS('Command', 'None') self._modifyPiGS('Status', 'Stopped', ping = False) self._modifyPiGS('Error', 'InitError: ' + message, ping = False) except Exception as e: self._googlePrint(e) pass self._print('InitError: ' + message) raise TypeError def _reinstructError(self, message): try: #self._modifyPiGS('Command', 'None') #self._modifyPiGS('Status', 'AwaitingCommands', ping = False) self._modifyPiGS('Error', 'InstructError: ' + message, ping = False) self._print(message) except Exception as e: self._googlePrint(e) pass # Update google doc to indicate error self.monitorCommands() def _print(self, text): #temperature = subprocess.run(['/opt/vc/bin/vcgencmd','measure_temp'], capture_output = True) try: print(str(text), file = self.lf, flush = True) except Exception as e: pass print(str(text), file = sys.stderr, flush = True) def _googlePrint(self, e): try: print(str(datetime.datetime.now()) + ': ' + str(type(e)) + ': ' + str(e), file = self.g_lf, flush = True) time.sleep(20) except AttributeError as e2: # log file not created yet so just print to stderr print(str(datetime.datetime.now()) + ': ' + str(type(e)) + ': ' + str(e), flush = True) time.sleep(20) def _returnRegColor(self, crop = True): # This function returns a registered color array if self.device == 'kinect': out = freenect.sync_get_video()[0] if self.device == 'realsense': frames = self.pipeline.wait_for_frames(1000) color_frame = frames.get_color_frame() out = np.asanyarray(color_frame.get_data()) if crop: return out[self.r[1]:self.r[1]+self.r[3], self.r[0]:self.r[0]+self.r[2]] else: return out def _returnDepth(self): # This function returns a float64 npy array containing one frame of data with all bad data as NaNs if self.device == 'realsense': frames = self.pipeline.wait_for_frames(1000) frames = self.align.process(frames) depth_frame = frames.get_depth_frame().as_depth_frame() #except RuntimeError: # self._googlePrint('No frame received from Kinect. Restarting') # self._start_kinect() # depth_frame = self.pipeline.wait_for_frames(1000).get_depth_frame().as_depth_frame() data = np.asanyarray(depth_frame.data)*depth_frame.get_units() # Convert to meters data[data==0] = np.nan # 0 indicates bad data from RealSense data[data>1] = np.nan # Anything further away than 1 m is a mistake return data[self.r[1]:self.r[1]+self.r[3], self.r[0]:self.r[0]+self.r[2]] def _returnCommand(self): command, projectID = self._getPiGS(['Command','ProjectID']) return command, projectID def _getPiGS(self, column_names): # Make this compatible with both lists and also strings if not isinstance(column_names, list): column_names = [column_names] for i in range(3): try: #print('Read request: ' + str(datetime.datetime.now())) data = self.pi_ws.get_all_values() except gspread.exceptions.APIError as e: if e.response.status_code == 429: # Read requests per minute exceeded self._googlePrint('Read requests per minute exceeded') continue elif e.response.status_code == 500: self._googlePrint('Internal error encountered') continue elif e.response.status_code == 404: self._googlePrint('Requested entity was not found') continue else: self._googlePrint('gspread error of unknown nature: ' + str(e)) continue except requests.exceptions.ReadTimeout as e: self._googlePrint('Requests read timeout error encountered') continue except Exception as e: self._googlePrint(f'uncaught exception in _getPiGS: {str(e)}') except requests.exceptions.ConnectionError as e: self._googlePrint('Requests connection error encountered') continue dt = pd.DataFrame(data[1:], columns = data[0]) self.dt = dt out_data = [] for column_name in column_names: if column_name not in dt.columns: self._googlePrint('Cant find column name in Controller: ' + column_name) raise Exception try: cell = dt.loc[(dt.RaspberryPiID == platform.node())&(dt.IP == self.IP),column_name] except AttributeError as error: pdb.set_trace() if len(cell) > 1: self._googlePrint('Multiple rows in the Controller with the same ID and IP. Using 1st') self._modifyPiGS('Error', 'InstructError: Multiple rows with the same IP/ID', ping = False) out_data.append(cell.values[0]) if len(out_data) == 1: return out_data[0] else: return out_data self._googlePrint('Failed contancting controller for three tries') return [None]*len(column_names) def _getRowColumn(self, column_name): column = self.dt.columns.get_loc(column_name) ping_column = self.dt.columns.get_loc('Ping') row = pd.Index((self.dt.RaspberryPiID == platform.node())&(self.dt.IP == self.IP)).get_loc(True) return (row + 2, column + 1, ping_column + 1) # 0 vs 1 indexing for pandas vs gspread + column names aren't in the pandas dataframe def _modifyPiGS(self, column_name, new_value, ping = True): for i in range(3): try: row, column, ping_column = self._getRowColumn(column_name) #print('Write request: ' + str(datetime.datetime.now())) self.pi_ws.update_cell(row, column, new_value) if ping: #print('Write request: ' + str(datetime.datetime.now())) self.pi_ws.update_cell(row, ping_column, str(datetime.datetime.now())) break except gspread.exceptions.APIError as e: if e.response.status_code == 429: # Read requests per minute exceeded self._googlePrint('Read requests per minute exceeded') continue elif e.response.status_code == 500: self._googlePrint('Internal error encountered') continue elif e.response.status_code == 404: self._googlePrint('Requested entity was not found') continue else: self._googlePrint('gspread error of unknown nature: ' + str(e)) continue except requests.exceptions.ReadTimeout: self._googlePrint('Read timeout error') continue def _video_recording(self): if datetime.datetime.now().hour >= 8 and datetime.datetime.now().hour <= 18: return True else: return False def _start_kinect(self): if self.device == 'realsense': # Create a context object. This object owns the handles to all connected realsense devices self.pipeline = rs.pipeline() self.align = rs.align(rs.stream.color) # Configure streams config = rs.config() config.enable_stream(rs.stream.depth, 640, 480, rs.format.z16, 30) config.enable_stream(rs.stream.color, 640, 480, rs.format.rgb8, 30) # Start streaming self.profile = self.pipeline.start(config) #device = self.profile.get_device() #depth_sensor = device.first_depth_sensor() #device.hardware_reset() frames = self.pipeline.wait_for_frames(1000) depth = frames.get_depth_frame() self.r = (0,0,depth.width,depth.height) def _diagnose_speed(self, time = 10): print('Diagnosing speed for ' + str(time) + ' seconds.', file = sys.stderr) delta = datetime.timedelta(seconds = time) start_t = datetime.datetime.now() counter = 0 while True: depth = self._returnDepth() counter += 1 if datetime.datetime.now() - start_t > delta: break #Grab single snapshot of depth and save it depth = self._returnDepth() np.save(self.projectDirectory +'Frames/FirstFrame.npy', depth) #Grab a bunch of depth files to characterize the variability data = np.zeros(shape = (50, self.r[3], self.r[2])) for i in range(0, 50): data[i] = self._returnDepth() counts = np.count_nonzero(~np.isnan(data), axis = 0) std = np.nanstd(data, axis = 0) np.save(self.projectDirectory +'Frames/FirstDataCount.npy', counts) np.save(self.projectDirectory +'Frames/StdevCount.npy', std) self._print('DiagnoseSpeed: Rate: ' + str(counter/time)) self._print('FirstFrameCaptured: FirstFrame: Frames/FirstFrame.npy,,GoodDataCount: Frames/FirstDataCount.npy,,StdevCount: Frames/StdevCount.npy') def _captureFrame(self, endtime, max_frames = 40, stdev_threshold = .05, count_threshold = 10, keep_all_data = False): # Captures time averaged frame of depth data sums = np.zeros(shape = (self.r[3], self.r[2])) n = np.zeros(shape = (self.r[3], self.r[2])) stds = np.zeros(shape = (self.r[3], self.r[2])) current_time = datetime.datetime.now() if current_time >= endtime: self._print('Frame without data') return counter = 1 all_data = np.empty(shape = (int(max_frames), self.r[3], self.r[2])) all_data[:] = np.nan for i in range(0, max_frames): all_data[i] = self._returnDepth() current_time = datetime.datetime.now() counter += 1 if current_time >= endtime: break time.sleep(10) if (endtime.minute > 0 and endtime.minute <= 5) or keep_all_data: self._print('AllDataCaptured: NpyFile: Frames/AllData_' + str(self.frameCounter).zfill(6) + '.npy,,PicFile: Frames/Frame_' + str(self.frameCounter).zfill(6) + '.jpg,,Time: ' + str(endtime) + ',,NFrames: ' + str(i)) np.save(self.projectDirectory +'Frames/AllData_' + str(self.frameCounter).zfill(6) + '.npy', all_data) bad_all_pixels = np.count_nonzero(np.isnan(all_data)) good_all_pixels = np.count_nonzero(~np.isnan(all_data)) med = np.nanmedian(all_data, axis = 0) std = np.nanstd(all_data, axis = 0) med[np.isnan(std)] = np.nan bad_std_avg_pixels = (std > stdev_threshold).sum() med[std > stdev_threshold] = np.nan std[std > stdev_threshold] = np.nan counts = np.count_nonzero(~np.isnan(all_data), axis = 0) bad_count_avg_pixels = (counts<count_threshold).sum() med[counts < count_threshold] = np.nan std[counts < count_threshold] = np.nan color = self._returnRegColor() outstring = 'FrameCaptured: NpyFile: Frames/Frame_' + str(self.frameCounter).zfill(6) + '.npy,,PicFile: Frames/Frame_' + str(self.frameCounter).zfill(6) + '.jpg,,' outstring += 'Time: ' + str(endtime) + ',,NFrames: ' + str(i) + ',,AvgMed: '+ '%.2f' % np.nanmean(med) + ',,AvgStd: ' + '%.2f' % np.nanmean(std) + ',,' outstring += 'GP: ' + str(np.count_nonzero(~np.isnan(med))) + ',,AllPixelsBad: ' + str(bad_all_pixels) + ',,AllPixelsGood: ' + str(good_all_pixels) + ',,' outstring += 'FilteredStdPixels: ' + str(bad_std_avg_pixels) + ',,FilteredCountPixels: ' + str(bad_count_avg_pixels) + ',,LOF: ' + str(self._video_recording()) self._print(outstring) np.save(self.projectDirectory +'Frames/Frame_' + str(self.frameCounter).zfill(6) + '.npy', med) matplotlib.image.imsave(self.projectDirectory+'Frames/Frame_' + str(self.frameCounter).zfill(6) + '.jpg', color) self.frameCounter += 1 return med def _uploadFiles(self): self._modifyPiGS('Status', 'Finishing converting and uploading of videos') for p in self.processes: p.communicate() for movieFile in os.listdir(self.videoDirectory): if '.h264' in movieFile: command = ['python3', 'unit_scripts/process_video.py', self.videoDirectory + movieFile] command += [str(self.camera.framerate[0]), self.projectID] self._print(command) self.processes.append(subprocess.Popen(command)) for p in self.processes: p.communicate() self._modifyPiGS('Status','Creating prep files') # Move files around as appropriate prepDirectory = self.projectDirectory + 'PrepFiles/' shutil.rmtree(prepDirectory) if os.path.exists(prepDirectory) else None os.makedirs(prepDirectory) lp = LP(self.loggerFile) self.frameCounter = lp.lastFrameCounter + 1 videoObj = [x for x in lp.movies if x.startTime.hour >= 8 and x.startTime.hour <= 20][0] subprocess.call(['cp', self.projectDirectory + videoObj.pic_file, prepDirectory + 'PiCameraRGB.jpg']) subprocess.call(['cp', self.projectDirectory + lp.movies[-1].pic_file, prepDirectory + 'LastPiCameraRGB.jpg']) # Find depthfile that is closest to the video file time if self.device != 'None': depthObj = [x for x in lp.frames if x.time > videoObj.startTime][0] subprocess.call(['cp', self.projectDirectory + depthObj.pic_file, prepDirectory + 'DepthRGB.jpg']) if not os.path.isdir(self.frameDirectory): self._modifyPiGS('Status', 'Error: ' + self.frameDirectory + ' does not exist.') return subprocess.call(['cp', self.frameDirectory + 'Frame_000001.npy', prepDirectory + 'FirstDepth.npy']) subprocess.call(['cp', self.frameDirectory + 'Frame_' + str(self.frameCounter-1).zfill(6) + '.npy', prepDirectory + 'LastDepth.npy']) try: self._modifyPiGS('Status', 'Uploading data to cloud') if self.device != 'None': self.fileManager.uploadData(self.frameDirectory, tarred = True) #print(prepDirectory) self.fileManager.uploadData(prepDirectory) #print(self.videoDirectory) self.fileManager.uploadData(self.videoDirectory) #print(self.loggerFile) self.fileManager.uploadData(self.loggerFile) self._modifyPiGS('Error','UploadSuccessful, ready for delete') except Exception as e: print('UploadError: ' + str(e)) self._modifyPiGS('Error','UploadFailed, Need to rerun') raise Exception def _closeFiles(self): try: self._print('MasterRecordStop: ' + str(datetime.datetime.now())) self.lf.close() except AttributeError: pass try: if self.system == 'mac': self.caff.kill() except AttributeError: pass