def __init__(self, logfile): self.lp = LP(logfile) self.fileManager = FM(projectID = self.lp.projectID) self.node = self.lp.uname.split("node='")[1].split("'")[0] self.lastFrameTime = self.lp.frames[-1].time self.masterDirectory = self.fileManager.localMasterDir self.projectDirectory = self.fileManager.localProjectDir self.credentialDrive = self.fileManager.localCredentialDrive self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet self._createImage() f = self.uploadImage(self.projectDirectory + self.lp.tankID + '.jpg', self.lp.tankID) self.insertImage(f)
def __init__(self, projectID=None, modelID=None, workers=None, summaryFile=None): self.projectID = projectID self.fileManager = FM(projectID=projectID, modelID=modelID, summaryFile=summaryFile) self.modelID = modelID if not self._checkProjectID(): raise Exception(projectID + ' is not valid.') self.workers = workers
def __init__(self): # 1: Define valid commands and ignore warnings self.commands = [ 'New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete', 'Snapshots' ] np.seterr(invalid='ignore') # 2: Determine which Kinect is attached (This script can handle v1 or v2 Kinects) self._identifyDevice() #Stored in self.device self.system = platform.node() # 3: Create file manager self.fileManager = FM() # 4: Download credential files self.fileManager.downloadData( self.fileManager.localCredentialSpreadsheet) self.fileManager.downloadData(self.fileManager.localCredentialDrive) self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet # Rename to make code readable # 5: Connect to Google Spreadsheets self._authenticateGoogleSpreadSheets() #Creates self.controllerGS self._modifyPiGS(error='') # 6: Start PiCamera self.camera = PiCamera() self.camera.resolution = (1296, 972) self.camera.framerate = 30 self.piCamera = 'True' # 7: Keep track of processes spawned to convert and upload videofiles self.processes = [] # 8: Set size of frame try: self.r except AttributeError: self.r = (0, 0, 640, 480) # 9: Await instructions self.monitorCommands()
def __init__(self, all_data): self.all_data = all_data # Flag to keep all data if desired # 1: Define valid commands and ignore warnings self.commands = ['New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete'] np.seterr(invalid='ignore') # 2: Determine which depth sensor is attached (This script can handle DepthSense cameras) self._identifyDevice() #Stored in self.device self.system = platform.node() # 3: Create file manager self.fileManager = FM() # 4: Start PiCamera self.camera = PiCamera() self.camera.resolution = (1296, 972) self.camera.framerate = 30 self.piCamera = 'True' # 5: Download credential files self.fileManager.downloadData(self.fileManager.localCredentialDir) self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet # Rename to make code readable self._authenticateGoogleSpreadSheets() #Creates self.controllerGS self._identifyTank() #Stored in self.tankID self._identifyServiceAccount() # 6: Keep track of processes spawned to convert and upload videofiles self.processes = [] # 7: Set size of frame try: self.r except AttributeError: self.r = (0,0,640,480) # 9: Await instructions print('Monitoring commands') self.monitorCommands()
import argparse, subprocess, pdb, shutil, os import pandas as pd from cichlid_bower_tracking.helper_modules.file_manager import FileManager as FM parser = argparse.ArgumentParser(description='This script is used to manually prepared projects for downstream analysis') parser.add_argument('--SummaryFile', type = str, help = 'Restrict analysis to projectIDs specified in csv file, which will be rewritten. ProjectIDs must be found in a column called projectID') parser.add_argument('--Start', type = int) parser.add_argument('--Total', type = int) args = parser.parse_args() fm_obj = FM() if args.SummaryFile is not None: summary_file = fm_obj.localAnalysisStatesDir + args.SummaryFile fm_obj.downloadData(summary_file) dt = pd.read_csv(summary_file, index_col = False) projectIDs = list(dt.projectID) if args.Start is not None: projectIDs = projectIDs[args.Start: args.Start + args.Total] else: projectIDs = fm_obj.getAllProjectIDs() for projectID in projectIDs: fm_obj = FM(projectID = projectID) print(projectID) lp = fm_obj.lp main_directory_data = subprocess.run(['rclone', 'lsf', 'cichlidVideo:McGrath/Apps/CichlidPiData/' + '__ProjectData/' + projectID + '/'], capture_output = True, encoding = 'utf-8').stdout.split('\n') for bad_data in ['AllClips.tar', 'MLClips.tar', 'MLFrames.tar', 'Backgrounds.tar']: if bad_data in main_directory_data:
parser.add_argument('-i', '--Initials', required=True, type=str, help='Initials to save annotations') args = parser.parse_args() numbers = {} # Identify projects to run analysis on if args.ProjectIDs is not None: projectIDs = args.ProjectIDs # Specified at the command line for projectID in projectIDs: numbers[projectID] = args.Number else: fm_obj = FM() summary_file = fm_obj.localAnalysisStatesDir + args.SummaryFile fm_obj.downloadData(summary_file) dt = pd.read_csv(summary_file, index_col=False) projectIDs = list( dt.projectID) # Only run analysis on projects that need it for projectID in projectIDs: if dt.loc[dt.projectID == projectID]['Labeled' + args.DataType] > 0: numbers[projectID] = dt.loc[dt.projectID == projectID][ 'Labeled' + args.DataType] # To run analysis efficiently, we download and upload data in the background while the main script runs for projectID, number in numbers.items(): print('Downloading: ' + projectID + ' ' + str(datetime.datetime.now()))
f = open(fileManager.localProjectDir + 'VideoProcessLog.txt', 'a') if indent: print(' ' + str(datetime.datetime.now()) + ': ' + str(message), file = f) else: print(str(datetime.datetime.now()) + ': ' + str(message), file = f) f.close() parser = argparse.ArgumentParser() parser.add_argument('VideoFile', type = str, help = 'Name of h264 file to be processed') parser.add_argument('Framerate', type = float, help = 'Video framerate') parser.add_argument('ProjectID', type = str, help = 'Video framerate') args = parser.parse_args() fileManager = FM(projectID = args.ProjectID) if '.h264' not in args.VideoFile: logPrinter(args.VideoFile + ' not an h264 file', indent = False) raise Exception(args.VideoFile + ' not an h264 file') # Convert h264 to mp4 if os.path.exists(args.VideoFile.replace('.h264', '.mp4')): logPrinter(args.VideoFile.replace('.h264', '.mp4') + ' already exits. Deleting') subprocess.run(['rm', '-f', args.VideoFile.replace('.h264', '.mp4')]) command = ['ffmpeg', '-r', str(args.Framerate), '-i', args.VideoFile, '-threads', '1', '-c:v', 'copy', '-r', str(args.Framerate), args.VideoFile.replace('.h264', '.mp4')] logPrinter('Beginning conversion of video: ' + args.VideoFile.split('/')[-1], indent = False) logPrinter(command) ffmpeg_output = subprocess.run(command, capture_output = True) try:
import argparse, subprocess from cichlid_bower_tracking.helper_modules.object_labeler import AnnotationDisagreements as AD from cichlid_bower_tracking.helper_modules.file_manager import FileManager as FM parser = argparse.ArgumentParser() parser.add_argument('User1', type = str, help = 'Initials of user annotations to compare') parser.add_argument('User2', type = str, help = 'Initials user annotations to compare') parser.add_argument('ProjectID', type = str, help = 'Project to analyze') parser.add_argument('-p', '--Practice', action = 'store_true', help = 'Use if you dont want to save annotations') args = parser.parse_args() fm_obj = FM(projectID = args.ProjectID) fm_obj.downloadData(fm_obj.localLabeledClipsProjectDir, tarred = True) fm_obj.downloadData(fm_obj.localBoxedFishFile) obj = AD(self.fileManager.localLabeledFramesProjectDir, self.fileManager.localBoxedFishFile, args.ProjectID, args.User1, args.User2) ad_obj = AD(fm_obj.localBoxedFishDir + args.ProjectID + '/', temp_dt, args.ProjectID, args.User1, args.User2, args.All) # Redownload csv in case new annotations have been added fm_obj.downloadData(fm_obj.localBoxedFishFile) old_dt = pd.read_csv(fm_obj.localBoxedFishFile, index_col = 0) new_dt = pd.read_csv(temp_dt) old_dt = old_dt.append(new_dt, sort = 'False').drop_duplicates(subset = ['ProjectID', 'Framefile', 'User', 'Sex', 'Box'], keep = 'last').sort_values(by = ['ProjectID', 'Framefile']) old_dt.to_csv(fm_obj.localBoxedFishFile, sep = ',', columns = ['ProjectID', 'Framefile', 'Nfish', 'Sex', 'Box', 'CorrectAnnotation','User', 'DateTime']) if not args.Practice:
from cichlid_bower_tracking.helper_modules.file_manager import FileManager as FM parser = argparse.ArgumentParser( description= 'This script is used to determine analysis states for each project.') parser.add_argument( '--SummaryFile', type=str, help= 'Restrict analysis to projectIDs specified in csv file, which will be rewritten. ProjectIDs must be found in a column called projectID' ) args = parser.parse_args() fm_obj = FM(summaryFile=args.SummaryFile) if args.SummaryFile is not None: summary_file = fm_obj.localSummaryFile fm_obj.downloadData(summary_file) dt = pd.read_csv(summary_file, index_col=False) projectIDs = list(dt.projectID) else: fm_obj.createDirectory(fm_obj.localAnalysisStatesDir) summary_file = fm_obj.localAnalysisStatesDir + 'AllProjects.csv' projectIDs = fm_obj.getAllProjectIDs() dt = pd.DataFrame(columns=[ 'projectID', 'tankID', 'StartingFiles', 'Prep', 'Depth', 'Cluster', 'ClusterClassification', 'LabeledVideos', 'LabeledFrames', 'Summary' ])
import subprocess, gspread, pdb from cichlid_bower_tracking.helper_modules.file_manager import FileManager as FM import pandas as pd # Requires ttab https://www.npmjs.com/package/ttab#manual-installation fileManager = FM() fileManager.downloadData(fileManager.localCredentialDir) gs = gspread.service_account(filename=fileManager.localCredentialSpreadsheet) controllerGS = gs.open('Controller') pi_ws = controllerGS.worksheet('RaspberryPi') data = pi_ws.get_all_values() dt = pd.DataFrame(data[1:], columns=data[0]) for row in dt.RaspberryPiID: print(row) #subprocess.run(['ssh-keygen', '-t', 'rsa', '-f', '~/.ssh/id_rsa']) #subprocess.run(['ssh-copy-id', 'pi@' + row + '.biosci.gatech.edu']) for row in dt.RaspberryPiID: subprocess.run( ['ttab', '-t', row, 'ssh', 'pi@' + row + '.biosci.gatech.edu'])