Exemplo n.º 1
0
    def __init__(self, projectID, workers=None):

        self.projectID = projectID
        self.workers = workers
        self.fileManager = FM()
        self.projFileManager = self.fileManager.retProjFileManager(projectID)
        self.mlFileManager = self.fileManager.retMLFileManager()
    def __init__(self, projectID, workers=None):

        self.projectID = projectID
        if not self._checkProjectID():
            raise Exception(projectID + ' is not valid.')
        self.workers = workers
        self.fileManager = FM(projectID=projectID)
    def __init__(self):

        # 1: Define valid commands and ignore warnings
        self.commands = [
            'New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete',
            'Snapshots'
        ]
        np.seterr(invalid='ignore')

        # 2: Determine which Kinect is attached (This script can handle v1 or v2 Kinects)
        self._identifyDevice()  #Stored in self.device

        # 3: Create file manager
        self.fileManager = FM()

        # 4: Download credential files
        self.fileManager.downloadData(self.localCredentialSpreadsheet)
        self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet  # Rename to make code readable

        # 5: Identify credential files (Credential files for uploading updates to Google Drive are found here)
        self.credentialSpreadsheet = self.masterDirectory + 'CredentialFiles/SAcredentials.json'

        # 6: Connect to Google Spreadsheets
        self._authenticateGoogleSpreadSheets()  #Creates self.controllerGS
        self._modifyPiGS(error='')

        # 7: Start PiCamera
        try:
            from picamera import PiCamera
            self.camera = PiCamera()
            self.camera.resolution = (1296, 972)
            self.camera.framerate = 30
            self.piCamera = 'True'
        except Exception:
            self.piCamera = 'False'

        # 8: Keep track of processes spawned to convert and upload videofiles
        self.processes = []

        # 9: Await instructions
        self.monitorCommands()
    def __init__(self):

        # 1: Define valid commands and ignore warnings
        self.commands = [
            'New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete',
            'Snapshots'
        ]
        np.seterr(invalid='ignore')

        # 2: Determine which Kinect is attached (This script can handle v1 or v2 Kinects)
        self._identifyDevice()  #Stored in self.device
        self.system = platform.node()

        # 3: Create file manager
        self.fileManager = FM()

        # 4: Download credential files
        self.fileManager.downloadData(
            self.fileManager.localCredentialSpreadsheet)
        self.fileManager.downloadData(self.fileManager.localCredentialDrive)
        self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet  # Rename to make code readable

        # 5: Connect to Google Spreadsheets
        self._authenticateGoogleSpreadSheets()  #Creates self.controllerGS
        self._modifyPiGS(error='')

        # 6: Start PiCamera
        self.camera = PiCamera()
        self.camera.resolution = (1296, 972)
        self.camera.framerate = 30
        self.piCamera = 'True'

        # 7: Keep track of processes spawned to convert and upload videofiles
        self.processes = []

        # 8: Set size of frame
        self.r = (0, 0, 640, 480)

        # 9: Await instructions
        self.monitorCommands()
Exemplo n.º 5
0
from Modules.FileManagers.FileManager import FileManager as FM
import subprocess, pdb
pdb.set_trace()
anFM_obj = FM().retAnFileManager()

labeledClipsDir = anFM_obj.prepareVideoAnnotation('10classLabels')
#labeledClipsDir = '/Users/pmcgrath7/Temp/CichlidAnalyzer/__AnnotatedData/LabeledVideos/10classLabels/LabeledClips/'
#pdb.set_trace()

#subprocess.run(['python3', 'Modules/MachineLearning/3D_resnet.py', '--data', labeledClipsDir])
Exemplo n.º 6
0
                    '--Practice',
                    action='store_true',
                    help='Use if you dont want to save annotations')
parser.add_argument('-i',
                    '--Initials',
                    type=str,
                    help='Initials to save annotations')

args = parser.parse_args()

if args.Initials is None:
    initials = socket.gethostname()
else:
    initials = args.Initials

fm_obj = FM(projectID=args.ProjectID)
fm_obj.createDirectory(fm_obj.localAnalysisDir)
fm_obj.downloadData(fm_obj.localManualLabelClipsDir, tarred=True)
fm_obj.downloadData(fm_obj.localLabeledClipsFile)

temp_csv = fm_obj.localAnalysisDir + 'NewAnnotations.csv'

# Read in annotations and create csv file for all annotations with the same user and projectID
dt = pd.read_csv(fm_obj.localLabeledClipsFile, index_col='LID')
new_dt = pd.DataFrame(columns=dt.columns)
clips = [
    x for x in os.listdir(fm_obj.localManualLabelClipsDir)
    if 'ManualLabel.mp4' in x
]

categories = ['c', 'f', 'p', 't', 'b', 'm', 's', 'x', 'o', 'd', 'q', 'k']
Exemplo n.º 7
0
parser = argparse.ArgumentParser(
    description='This command runs HMM analysis on a single row of data.')
parser.add_argument('ProjectID', type=str, help='ProjectID to analyze')
parser.add_argument('-n',
                    '--Number',
                    type=int,
                    help='Limit annotation to x number of frames.')
parser.add_argument('-p',
                    '--Practice',
                    action='store_true',
                    help='Use if you dont want to save annotations')

args = parser.parse_args()

fileManager = FM()
projFileManager = fileManager.retProjFileManager(args.ProjectID)
projFileManager.downloadData('ObjectLabeler')

anFileManager = fileManager.retAnFileManager()

obj = ObjectLabeler(projFileManager.localManualLabelFramesDir,
                    projFileManager.localLabeledFramesFile, args.Number,
                    args.ProjectID)

if not args.Practice:
    # Backup annotations. Redownload to avoid race conditions
    if not os.path.exists(projFileManager.localLabeledFramesFile):
        print(projFileManager.localLabeledFramesFile +
              'does not exist. Did you annotate any new frames? Quitting...')
    else:
Exemplo n.º 8
0
class ProjectPreparer():
    # This class takes in a projectID and runs all the appropriate analysis

    def __init__(self, projectID, workers=None):

        self.projectID = projectID
        self.workers = workers
        self.fileManager = FM()
        self.projFileManager = self.fileManager.retProjFileManager(projectID)
        self.mlFileManager = self.fileManager.retMLFileManager()

    def downloadData(self, dtype):
        self.fileManager.createDirs()
        self.projFileManager.downloadData(dtype)
        if dtype in ['Download', 'MLClassification']:
            self.mlFileManager.downloadData()

    def runPrepAnalysis(self):
        self.fileManager.createDirs()
        self.projFileManager.downloadData('Prep')
        prp_obj = PrP(self.projFileManager)
        prp_obj.validateInputData()
        prp_obj.prepData()
        self.createUploadFile(prp_obj.uploads)
        self.createAnalysisUpdate('Prep', prp_obj)
        self.backupAnalysis()
        #self.localDelete()

    def runDepthAnalysis(self):
        dp_obj = DP(self.projFileManager, self.workers)
        dp_obj.validateInputData()
        dp_obj.createSmoothedArray()
        dp_obj.createRGBVideo()
        self.createUploadFile(dp_obj.uploads)
        self.createAnalysisUpdate('Depth', dp_obj)

    def runClusterAnalysis(self, videoIndex):
        cp_obj = CP(self.projFileManager, self.workers, videoIndex)
        cp_obj.validateInputData()
        cp_obj.runClusterAnalysis()
        self.createUploadFile(cp_obj.uploads)
        self.createAnalysisUpdate('Cluster', cp_obj)

    def createAnnotationFrames(self):
        cp_obj = CP(self.projFileManager, self.workers)
        cp_obj.validateInputData()
        cp_obj.createAnnotationFrames()
        self.createUploadFile(cp_obj.uploads)

    def runMLClusterClassifier(self):
        mlc_obj = MLP(self.projFileManager, self.mlFileManager)
        mlc_obj.validateInputData()
        mlc_obj.predictVideoLabels()
        self.createUploadFile(mlc_obj.uploads)
        self.createAnalysisUpdate('MLClassifier', mlc_obj)

    def runMLFishDetection(self):
        pass

    def runFiguresCreation(self):
        fc_obj = FC(self.projFileManager)
        fc_obj.validateInputData()

        self.createUploadFile(fc_obj.uploads)
        self.createAnalysisUpdate('Figures', fc_obj)

    def runObjectLabeling(self):
        self.projFileManager.downloadData('ObjectLabeler')
        lc_obj = LC(self.projFileManager)
        lc_obj.validateInputData()

    def backupAnalysis(self):
        uploadCommands = set()

        uploadFiles = [
            x for x in os.listdir(self.fileManager.localUploadDir)
            if 'UploadData' in x
        ]

        for uFile in uploadFiles:
            with open(self.fileManager.localUploadDir + uFile) as f:
                line = next(f)
                for line in f:
                    tokens = line.rstrip().split(',')
                    tokens[2] = bool(int(tokens[2]))
                    uploadCommands.add(tuple(tokens))

        for command in uploadCommands:
            self.fileManager.uploadData(command[0], command[1], command[2])

        for uFile in uploadFiles:
            pass
            subprocess.run(
                ['rm', '-rf', self.fileManager.localUploadDir + uFile])

        self.fileManager.uploadData(self.fileManager.localAnalysisLogDir,
                                    self.fileManager.cloudAnalysisLogDir,
                                    False)
        subprocess.run(['rm', '-rf', self.projFileManager.localMasterDir])

    def localDelete(self):
        subprocess.run(['rm', '-rf', self.projFileManager.localMasterDir])

    def createUploadFile(self, uploads):
        with open(
                self.fileManager.localUploadDir + 'UploadData_' +
                str(datetime.datetime.now().timestamp()) + '.csv', 'w') as f:
            print('Local,Cloud,Tar', file=f)
            for upload in uploads:
                print(upload[0] + ',' + upload[1] + ',' + str(upload[2]),
                      file=f)

    def createAnalysisUpdate(self, aType, procObj):
        now = datetime.datetime.now()
        with open(
                self.fileManager.localAnalysisLogDir + 'AnalysisUpdate_' +
                str(now.timestamp()) + '.csv', 'w') as f:
            print('ProjectID,Type,Version,Date', file=f)
            print(self.projectID + ',' + aType + ',' + procObj.__version__ +
                  '_' + os.getenv('USER') + ',' + str(now),
                  file=f)
Exemplo n.º 9
0
                    type=str,
                    help='Name of h264 file to be processed')
parser.add_argument('Framerate', type=float, help='Video framerate')

args = parser.parse_args()

fileManager = FM()

if '.h264' not in args.VideoFile:
    raise Exception(args.VideoFile + ' not an h264 file')

# Convert h264 to mp4
ffmpeg_output = subprocess.run([
    'ffmpeg', '-r',
    str(framerate), '-i', args.VideoFile, '-threads', '1', '-c:v', 'copy',
    '-r',
    str(args.Framerate),
    args.VideoFile.replace('.h264', '.mp4')
],
                               capture_output=True,
                               decoding='utf-8')
assert os.path.isfile(args.VideoFile.replace('.h264', '.mp4'))
assert os.path.getsize(args.VideoFile.replace(
    '.h264', '.mp4')) > os.path.getsize(args.VideoFile)

# Sync with cloud (will return error if something goes wrong)
FM.uploadData(args.VideoFile.replace('.h264', '.mp4'))

# Delete videos
subprocess.run(['mv', args.VideoFile, '../Backups/'])
Exemplo n.º 10
0
 def __init__(self):
     __version__ = '1.0.0'
     self.fileManager = FM()
     self.projectTypes = [
         'Prep', 'Depth', 'Cluster', 'MLCluster', 'MLObject', 'Summary'
     ]
Exemplo n.º 11
0
class AnalysisPreparer:
    def __init__(self):
        __version__ = '1.0.0'
        self.fileManager = FM()
        self.projectTypes = [
            'Prep', 'Depth', 'Cluster', 'MLCluster', 'MLObject', 'Summary'
        ]

    def updateAnalysisFile(self, newProjects=True, projectSummary=True):
        self._loadAnalysisDir()
        if newProjects:
            self._identifyNewProjects()
        self._mergeUpdates()
        if projectSummary:
            self._createProjectsSummary()
        self.fileManager.uploadData(self.fileManager.localAnalysisSummaryFile,
                                    self.fileManager.cloudAnalysisLogDir,
                                    False)

    def checkProjects(self, projects):
        self._loadAnalysisDir()
        self._createProjectsSummary(print_screen=False)
        badProject = False
        for project in projects:
            if project not in self.info['All']:
                print(project + ' not a valid project')
                badProject = True
        return badProject

    def _loadAnalysisDir(self):
        self.fileManager.downloadDirectory(self.fileManager.analysisDir)
        self.anDT = pd.read_excel(self.fileManager.localAnalysisSummaryFile,
                                  index_col=0,
                                  sheet_name='Master')

    def _identifyNewProjects(self):
        necessaryFiles = [
            'Logfile.txt', 'Frames.tar', 'Videos/', 'PrepFiles/DepthRGB.jpg',
            'PrepFiles/FirstDepth.npy', 'PrepFiles/LastDepth.npy',
            'PrepFiles/PiCameraRGB.jpg'
        ]
        goodProjects = set()

        if os.path.exists(self.fileManager.mountedDropboxMasterDir):
            print('Collecting directories using locally mounted Dropbox')

            root, subdirs, files = next(
                os.walk(self.fileManager.mountedDropboxMasterDir)
            )  # Get first set of directories in master file

            for projectID in subdirs:
                goodProject = True
                for nFile in necessaryFiles:
                    if not os.path.exists(
                            self.fileManager.mountedDropboxMasterDir +
                            projectID + '/' + nFile):
                        goodProject = False

                if goodProject:
                    goodProjects.add(projectID)
        else:
            print('Collecting directories using rclone')

            projectData = subprocess.run([
                'rclone', 'lsf', '-R', '--max-depth', '3',
                self.fileManager.cloudMasterDir
            ],
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE)
            projectData = projectData.stdout.decode().split('\n')

            potentialProjects = set()
            for directory in projectData:
                potentialProjects.add(directory.split('/')[0])

            for projectID in sorted(potentialProjects):
                necessaryFiles = [projectID + '/' + x for x in necessaryFiles]
                goodProject = True

                for nFile in necessaryFiles:
                    if nFile not in projectData:
                        goodProject = False

                if goodProject:
                    goodProjects.add(projectID)

        analysisData = collections.defaultdict(list)
        projectIDs = []
        for projectID in sorted(goodProjects):
            if projectID not in self.anDT.index:
                projectIDs.append(projectID)
                analysisData['Prep_Version'].append('None')
                analysisData['Prep_Date'].append('None')

                analysisData['Depth_Version'].append('None')
                analysisData['Depth_Date'].append('None')

                analysisData['Cluster_Version'].append('None')
                analysisData['Cluster_Date'].append('None')

                analysisData['MLCluster_Version'].append('None')
                analysisData['MLCluster_Date'].append('None')

                analysisData['MLObject_Version'].append('None')
                analysisData['MLObject_Date'].append('None')

                analysisData['Summary_Version'].append('None')
                analysisData['Summary_Date'].append('None')

        dt = pd.DataFrame(analysisData, index=sorted(projectIDs))
        self.anDT = self.anDT.append(dt)
        self.anDT.to_excel(self.fileManager.localAnalysisSummaryFile,
                           sheet_name='Master',
                           index=True)  # doctest: +SKIP

    def _mergeUpdates(self):

        updateFiles = [
            x for x in os.listdir(self.fileManager.localAnalysisLogDir)
            if 'AnalysisUpdate' in x
        ]
        if updateFiles == []:
            return
        updateDTs = []
        for update in updateFiles:
            updateDTs.append(
                pd.read_csv(self.fileManager.localAnalysisLogDir + update,
                            sep=','))
        allUpdates = pd.concat(updateDTs)
        allUpdates.Date = pd.to_datetime(allUpdates.Date,
                                         format='%Y-%m-%d %H:%M:%S.%f')
        allUpdates = allUpdates.sort_values(['ProjectID', 'Date'
                                             ]).groupby(['ProjectID',
                                                         'Type']).last()
        for index, row in allUpdates.iterrows():
            self.anDT.loc[index[0], index[1] + '_Version'] = row.Version
            self.anDT.loc[index[0], index[1] + '_Date'] = row.Date

        self.anDT.to_excel(self.fileManager.localAnalysisSummaryFile,
                           sheet_name='Master',
                           index=True)
        for update in updateFiles:
            subprocess.run(
                ['rm', '-f', self.fileManager.localAnalysisLogDir + update])
            subprocess.run([
                'rclone', 'delete',
                self.fileManager.cloudAnalysisLogDir + update
            ])

    def _createProjectsSummary(self, print_screen=False):
        self.info = {}
        self.info['All'] = list(self.anDT.index)
        for analysis in self.projectTypes:
            self.info[analysis] = list(
                self.anDT[self.anDT[analysis + '_Version'] == 'None'].index)

        if print_screen:
            print('AllProjects: ' + ','.join(self.info['All']))
            for analysis in self.projectTypes:
                print('Unanalyzed ' + analysis + ': ' +
                      ','.join(self.info[analysis]))
Exemplo n.º 12
0
class CichlidTracker:
    def __init__(self):

        # 1: Define valid commands and ignore warnings
        self.commands = [
            'New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete',
            'Snapshots'
        ]
        np.seterr(invalid='ignore')

        # 2: Determine which Kinect is attached (This script can handle v1 or v2 Kinects)
        self._identifyDevice()  #Stored in self.device

        # 3: Create file manager
        self.fileManager = FM()

        # 4: Download credential files
        self.fileManager.downloadData(self.localCredentialSpreadsheet)
        self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet  # Rename to make code readable

        # 5: Identify credential files (Credential files for uploading updates to Google Drive are found here)
        self.credentialSpreadsheet = self.masterDirectory + 'CredentialFiles/SAcredentials.json'

        # 6: Connect to Google Spreadsheets
        self._authenticateGoogleSpreadSheets()  #Creates self.controllerGS
        self._modifyPiGS(error='')

        # 7: Start PiCamera
        try:
            from picamera import PiCamera
            self.camera = PiCamera()
            self.camera.resolution = (1296, 972)
            self.camera.framerate = 30
            self.piCamera = 'True'
        except Exception:
            self.piCamera = 'False'

        # 8: Keep track of processes spawned to convert and upload videofiles
        self.processes = []

        # 9: Await instructions
        self.monitorCommands()

    def __del__(self):
        # Try to close out files and stop running Kinects
        self._modifyPiGS(command='None',
                         status='Stopped',
                         error='UnknownError')
        if self.piCamera:
            if self.camera.recording:
                self.camera.stop_recording()
                self._print('PiCameraStopped: Time=' +
                            str(datetime.datetime.now()) + ', File=Videos/' +
                            str(self.videoCounter).zfill(4) + "_vid.h264")

        try:
            if self.device == 'kinect2':
                self.K2device.stop()
            if self.device == 'kinect':
                freenect.sync_stop()
                freenect.shutdown(self.a)
        except AttributeError:
            pass
        self._closeFiles()

    def monitorCommands(self, delta=10):
        # This function checks the master Controller Google Spreadsheet to determine if a command was issued (delta = seconds to recheck)
        while True:
            self._identifyTank()  #Stored in self.tankID
            command, projectID = self._returnCommand()
            if projectID in ['', 'None']:
                self._reinstructError('ProjectID must be set')
                time.sleep(delta)
                continue

            print(command + '\t' + projectID)
            if command != 'None':
                self.fileManager.createProjectID(projectID)
                self.runCommand(command, projectID)
            self._modifyPiGS(status='AwaitingCommand')
            time.sleep(delta)

    def runCommand(self, command, projectID):
        # This function is used to run a specific command found int he  master Controller Google Spreadsheet
        self.projectID = projectID

        # Rename files to make code more readable
        self.projectDirectory = self.fileManager.localProjectDir
        self.loggerFile = self.fileManager.localLogfile
        self.frameDirectory = self.fileManager.localFrameDir
        self.backgroundDirectory = self.fileManager.localBackgroundDir
        self.videoDirectory = self.fileManager.localVideoDir

        if command not in self.commands:
            self._reinstructError(command +
                                  ' is not a valid command. Options are ' +
                                  str(self.commands))

        if command == 'Stop':

            if self.piCamera:
                if self.camera.recording:
                    self.camera.stop_recording()
                    self._print('PiCameraStopped: Time: ' +
                                str(datetime.datetime.now()) +
                                ',,File: Videos/' +
                                str(self.videoCounter).zfill(4) + "_vid.h264")

            try:
                if self.device == 'kinect2':
                    self.K2device.stop()
                if self.device == 'kinect':
                    freenect.sync_stop()
                    freenect.shutdown(self.a)
            except:
                self._print('ErrorStopping kinect')

            command = [
                'python3', 'Modules/processVideo.py', self.videoDirectory +
                str(self.videoCounter).zfill(4) + '_vid.h264'
            ]
            command += [
                self.loggerFile, self.projectDirectory,
                self.cloudVideoDirectory
            ]
            self._print(command)
            self.processes.append(subprocess.Popen(command))

            self._closeFiles()

            self._modifyPiGS(command='None', status='AwaitingCommand')
            return

        if command == 'UploadData':

            self._modifyPiGS(command='None')
            self._uploadFiles()
            return

        if command == 'LocalDelete':
            if os.path.exists(self.projectDirectory):
                shutil.rmtree(self.projectDirectory)
            self._modifyPiGS(command='None', status='AwaitingCommand')
            return

        self._modifyPiGS(command='None', status='Running', error='')

        if command == 'New':
            # Project Directory should not exist. If it does, report error
            if os.path.exists(self.projectDirectory):
                self._reinstructError(
                    'New command cannot be run if ouput directory already exists. Use Rewrite or Restart'
                )

        if command == 'Rewrite':
            if os.path.exists(self.projectDirectory):
                shutil.rmtree(self.projectDirectory)
            os.mkdir(self.projectDirectory)
            #subprocess.call([self.dropboxScript, '-f', self.credentialDropbox, 'delete', projectID], stdout = open(self.projectDirectory + 'DropboxDeleteOut.txt', 'a'), stderr = open(self.projectDirectory + 'DropboxDeleteError.txt', 'a'))

        if command in ['New', 'Rewrite']:
            self.masterStart = datetime.datetime.now()
            if command == 'New':
                os.mkdir(self.projectDirectory)
            os.mkdir(self.frameDirectory)
            os.mkdir(self.backgroundDirectory)
            os.mkdir(self.videoDirectory)
            #self._createDropboxFolders()
            self.frameCounter = 1
            self.backgroundCounter = 1
            self.videoCounter = 1

        if command == 'Restart':
            logObj = LP.LogParser(self.loggerFile)
            self.masterStart = logObj.master_start
            self.r = logObj.bounding_shape
            self.frameCounter = logObj.lastFrameCounter + 1
            self.backgroundCounter = logObj.lastBackgroundCounter + 1
            self.videoCounter = logObj.lastVideoCounter + 1
            if self.system != logObj.system or self.device != logObj.device or self.piCamera != logObj.camera:
                self._reinstructError(
                    'Restart error. System, device, or camera does not match what is in logfile'
                )

        self.lf = open(self.loggerFile, 'a')
        self._modifyPiGS(start=str(self.masterStart))

        if command in ['New', 'Rewrite']:
            self._print('MasterStart: System: ' + self.system + ',,Device: ' +
                        self.device + ',,Camera: ' + str(self.piCamera) +
                        ',,Uname: ' + str(platform.uname()) + ',,TankID: ' +
                        self.tankID + ',,ProjectID: ' + self.projectID)
            self._print('MasterRecordInitialStart: Time: ' +
                        str(self.masterStart))
            self._print(
                'PrepFiles: FirstDepth: PrepFiles/FirstDepth.npy,,LastDepth: PrepFiles/LastDepth.npy,,PiCameraRGB: PiCameraRGB.jpg,,DepthRGB: DepthRGB.jpg'
            )

            self._createROI(useROI=False)

        else:
            self._print('MasterRecordRestart: Time: ' +
                        str(datetime.datetime.now()))

        # Start kinect
        self._start_kinect()

        # Diagnose speed
        self._diagnose_speed()

        # Capture data
        self.captureFrames()

    def captureFrames(self,
                      frame_delta=5,
                      background_delta=5,
                      max_frames=20,
                      stdev_threshold=20):

        current_background_time = datetime.datetime.now()
        current_frame_time = current_background_time + datetime.timedelta(
            seconds=60 * frame_delta)

        command = ''

        while True:
            self._modifyPiGS(command='None', status='Running', error='')
            # Grab new time
            now = datetime.datetime.now()

            # Fix camera if it needs to be
            if self.piCamera:
                if self._video_recording() and not self.camera.recording:
                    self.camera.capture(self.videoDirectory +
                                        str(self.videoCounter).zfill(4) +
                                        "_pic.jpg")
                    self.camera.start_recording(
                        self.videoDirectory + str(self.videoCounter).zfill(4) +
                        "_vid.h264",
                        bitrate=7500000)
                    self._print('PiCameraStarted: FrameRate: ' +
                                str(self.camera.framerate) + ',,Resolution: ' +
                                str(self.camera.resolution) + ',,Time: ' +
                                str(datetime.datetime.now()) +
                                ',,VideoFile: Videos/' +
                                str(self.videoCounter).zfill(4) +
                                '_vid.h264,,PicFile: Videos/' +
                                str(self.videoCounter).zfill(4) + '_pic.jpg')
                elif not self._video_recording() and self.camera.recording:
                    self.camera.stop_recording()
                    self._print('PiCameraStopped: Time: ' +
                                str(datetime.datetime.now()) +
                                ',, File: Videos/' +
                                str(self.videoCounter).zfill(4) + "_vid.h264")
                    #self._print(['rclone', 'copy', self.videoDirectory + str(self.videoCounter).zfill(4) + "_vid.h264"])
                    command = [
                        'python3', 'Modules/processVideo.py',
                        self.videoDirectory + str(self.videoCounter).zfill(4) +
                        '_vid.h264'
                    ]
                    command += [
                        self.loggerFile, self.projectDirectory,
                        self.cloudVideoDirectory
                    ]
                    self._print(command)
                    self.processes.append(subprocess.Popen(command))
                    self.videoCounter += 1

            # Capture a frame and background if necessary

            if now > current_background_time:
                if command == 'Snapshots':
                    out = self._captureFrame(current_frame_time,
                                             new_background=True,
                                             max_frames=max_frames,
                                             stdev_threshold=stdev_threshold,
                                             snapshots=True)
                else:
                    out = self._captureFrame(current_frame_time,
                                             new_background=True,
                                             max_frames=max_frames,
                                             stdev_threshold=stdev_threshold)
                if out is not None:
                    current_background_time += datetime.timedelta(
                        seconds=60 * background_delta)
                subprocess.Popen(
                    ['python3', 'Modules/DriveUpdater.py', self.loggerFile])
            else:
                if command == 'Snapshots':
                    out = self._captureFrame(current_frame_time,
                                             new_background=False,
                                             max_frames=max_frames,
                                             stdev_threshold=stdev_threshold,
                                             snapshots=True)
                else:
                    out = self._captureFrame(current_frame_time,
                                             new_background=False,
                                             max_frames=max_frames,
                                             stdev_threshold=stdev_threshold)
            current_frame_time += datetime.timedelta(seconds=60 * frame_delta)

            self._modifyPiGS(status='Running')

            # Check google doc to determine if recording has changed.
            try:
                command, projectID = self._returnCommand()
            except KeyError:
                continue
            if command != 'None':
                if command == 'Snapshots':
                    self._modifyPiGS(command='None',
                                     status='Writing Snapshots')
                    continue
                else:
                    break
            else:
                self._modifyPiGS(error='')

    def _authenticateGoogleSpreadSheets(self):
        scope = [
            "https://spreadsheets.google.com/feeds",
            "https://www.googleapis.com/auth/spreadsheets"
        ]
        credentials = ServiceAccountCredentials.from_json_keyfile_name(
            self.credentialSpreadsheet, scope)
        for i in range(0, 3):  # Try to autheticate three times before failing
            try:
                gs = gspread.authorize(credentials)
            except:
                continue
            try:
                self.controllerGS = gs.open('Controller')
                pi_ws = self.controllerGS.worksheet('RaspberryPi')
            except:
                continue
            try:
                headers = pi_ws.row_values(1)
            except:
                continue
            column = headers.index('RaspberryPiID') + 1
            try:
                pi_ws.col_values(column).index(platform.node())
                return True
            except ValueError:
                s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
                s.connect(("8.8.8.8", 80))
                ip = s.getsockname()[0]
                s.close()
                try:
                    pi_ws.append_row([
                        platform.node(), ip, '', '', '', '', '', 'None',
                        'Stopped', 'Error: Awaiting assignment of TankID',
                        str(datetime.datetime.now())
                    ])
                except:
                    continue
                return True
            except:
                continue
            time.sleep(2)
        return False

    def _identifyDevice(self):
        try:
            global freenect
            import freenect
            self.a = freenect.init()
            if freenect.num_devices(self.a) == 0:
                kinect = False
            elif freenect.num_devices(self.a) > 1:
                self._initError(
                    'Multiple Kinect1s attached. Unsure how to handle')
            else:
                kinect = True
        except ImportError:
            kinect = False

        try:
            global FN2
            import pylibfreenect2 as FN2
            if FN2.Freenect2().enumerateDevices() == 1:
                kinect2 = True
            elif FN2.Freenect2().enumerateDevices() > 1:
                self._initError(
                    'Multiple Kinect2s attached. Unsure how to handle')
            else:
                kinect2 = False
        except ImportError:
            kinect2 = False

        if kinect and kinect2:
            self._initError(
                'Kinect1 and Kinect2 attached. Unsure how to handle')
        elif not kinect and not kinect2:
            self._initError('No depth sensor  attached')
        elif kinect:
            self.device = 'kinect'
        else:
            self.device = 'kinect2'

    def _identifyTank(self):
        while True:
            self._authenticateGoogleSpreadSheets(
            )  # link to google drive spreadsheet stored in self.controllerGS
            pi_ws = self.controllerGS.worksheet('RaspberryPi')
            headers = pi_ws.row_values(1)
            raPiID_col = headers.index('RaspberryPiID') + 1
            for i in range(5):
                try:
                    row = pi_ws.col_values(raPiID_col).index(
                        platform.node()) + 1
                    break
                except:
                    continue
            col = headers.index('TankID')
            if pi_ws.row_values(row)[col] not in ['None', '']:
                self.tankID = pi_ws.row_values(row)[col]
                for i in range(5):
                    try:
                        self._modifyPiGS(capability='Device=' + self.device +
                                         ',Camera=' + str(self.piCamera),
                                         status='AwaitingCommand')
                        return
                    except:
                        continue
                return
            else:
                self._modifyPiGS(error='Awaiting assignment of TankID')
                time.sleep(5)

    def _initError(self, message):
        try:
            self._modifyPiGS(command='None',
                             status='Stopped',
                             error='InitError: ' + message)
        except:
            pass
        self._print('InitError: ' + message)
        raise TypeError

    def _reinstructError(self, message):
        self._modifyPiGS(command='None',
                         status='AwaitingCommands',
                         error='InstructError: ' + message)

        # Update google doc to indicate error
        self.monitorCommands()

    def _print(self, text):
        try:
            print(text, file=self.lf, flush=True)
        except:
            pass
        print(text, file=sys.stderr, flush=True)

    def _returnRegColor(self, crop=True):
        # This function returns a registered color array
        if self.device == 'kinect':
            out = freenect.sync_get_video()[0]

        elif self.device == 'kinect2':
            undistorted = FN2.Frame(512, 424, 4)
            registered = FN2.Frame(512, 424, 4)
            frames = self.listener.waitForNewFrame()
            color = frames["color"]
            depth = frames["depth"]
            self.registration.apply(color,
                                    depth,
                                    undistorted,
                                    registered,
                                    enable_filter=False)
            reg_image = registered.asarray(np.uint8)[:, :, 0:3].copy()
            self.listener.release(frames)
            out = reg_image

        if crop:
            return out[self.r[1]:self.r[1] + self.r[3],
                       self.r[0]:self.r[0] + self.r[2]]
        else:
            return out

    def _returnDepth(self):
        # This function returns a float64 npy array containing one frame of data with all bad data as NaNs
        if self.device == 'kinect':
            data = freenect.sync_get_depth()[0].astype('Float64')
            data[data == 2047] = np.nan  # 2047 indicates bad data from Kinect
            return data[self.r[1]:self.r[1] + self.r[3],
                        self.r[0]:self.r[0] + self.r[2]]

        elif self.device == 'kinect2':
            frames = self.listener.waitForNewFrame(timeout=1000)
            output = frames['depth'].asarray()
            self.listener.release(frames)
            return output[self.r[1]:self.r[1] + self.r[3],
                          self.r[0]:self.r[0] + self.r[2]]

    def _returnCommand(self):
        if not self._authenticateGoogleSpreadSheets():
            raise KeyError
            # link to google drive spreadsheet stored in self.controllerGS
        while True:
            try:
                pi_ws = self.controllerGS.worksheet('RaspberryPi')
                headers = pi_ws.row_values(1)
                piIndex = pi_ws.col_values(headers.index('RaspberryPiID') +
                                           1).index(platform.node())
                command = pi_ws.col_values(headers.index('Command') +
                                           1)[piIndex]
                projectID = pi_ws.col_values(headers.index('ProjectID') +
                                             1)[piIndex]
                return command, projectID
            except gspread.exceptions.RequestError:
                continue

    def _modifyPiGS(self,
                    start=None,
                    command=None,
                    status=None,
                    IP=None,
                    capability=None,
                    error=None):
        while not self._authenticateGoogleSpreadSheets(
        ):  # link to google drive spreadsheet stored in self.controllerGS
            continue
        try:
            pi_ws = self.controllerGS.worksheet('RaspberryPi')
            headers = pi_ws.row_values(1)
            row = pi_ws.col_values(headers.index('RaspberryPiID') + 1).index(
                platform.node()) + 1
            if start is not None:
                column = headers.index('MasterStart') + 1
                pi_ws.update_cell(row, column, start)
            if command is not None:
                column = headers.index('Command') + 1
                pi_ws.update_cell(row, column, command)
            if status is not None:
                column = headers.index('Status') + 1
                pi_ws.update_cell(row, column, status)
            if error is not None:
                column = headers.index('Error') + 1
                pi_ws.update_cell(row, column, error)
            if IP is not None:
                column = headers.index('IP') + 1
                pi_ws.update_cell(row, column, IP)
            if capability is not None:
                column = headers.index('Capability') + 1
                pi_ws.update_cell(row, column, capability)
            column = headers.index('Ping') + 1
            pi_ws.update_cell(row, column, str(datetime.datetime.now()))
        except gspread.exceptions.RequestError as e:
            self._print('GoogleError: Time: ' + str(datetime.datetime.now()) +
                        ',,Error: ' + str(e))
        except TypeError:
            self._print(
                'GoogleError: Time: ' + str(datetime.datetime.now()) +
                ',,Error: Unknown. Gspread does not handle RequestErrors properly'
            )

    def _video_recording(self):
        if datetime.datetime.now().hour >= 8 and datetime.datetime.now(
        ).hour <= 18:
            return True
        else:
            return False

    def _start_kinect(self):
        if self.device == 'kinect':
            freenect.sync_get_depth()  #Grabbing a frame initializes the device
            freenect.sync_get_video()

        elif self.device == 'kinect2':
            # a: Identify pipeline to use: 1) OpenGL, 2) OpenCL, 3) CPU
            try:
                self.pipeline = FN2.OpenCLPacketPipeline()
            except:
                try:
                    self.pipeline = FN2.OpenGLPacketPipeline()
                except:
                    self.pipeline = FN2.CpuPacketPipeline()
            self._print('PacketPipeline: ' + type(self.pipeline).__name__)

            # b: Create and set logger
            self.logger = FN2.createConsoleLogger(FN2.LoggerLevel.NONE)
            FN2.setGlobalLogger(self.logger)

            # c: Identify device and create listener
            self.fn = FN2.Freenect2()
            serial = self.fn.getDeviceSerialNumber(0)
            self.K2device = self.fn.openDevice(serial, pipeline=self.pipeline)

            self.listener = FN2.SyncMultiFrameListener(FN2.FrameType.Color
                                                       | FN2.FrameType.Depth)
            # d: Register listeners
            self.K2device.setColorFrameListener(self.listener)
            self.K2device.setIrAndDepthFrameListener(self.listener)
            # e: Start device and create registration
            self.K2device.start()
            self.registration = FN2.Registration(
                self.K2device.getIrCameraParams(),
                self.K2device.getColorCameraParams())

    def _diagnose_speed(self, time=10):
        print('Diagnosing speed for ' + str(time) + ' seconds.',
              file=sys.stderr)
        delta = datetime.timedelta(seconds=time)
        start_t = datetime.datetime.now()
        counter = 0
        while True:
            depth = self._returnDepth()
            counter += 1
            if datetime.datetime.now() - start_t > delta:
                break
        #Grab single snapshot of depth and save it
        depth = self._returnDepth()
        np.save(self.projectDirectory + 'Frames/FirstFrame.npy', depth)

        #Grab a bunch of depth files to characterize the variability
        data = np.zeros(shape=(50, self.r[3], self.r[2]))
        for i in range(0, 50):
            data[i] = self._returnDepth()

        counts = np.count_nonzero(~np.isnan(data), axis=0)
        std = np.nanstd(data, axis=0)
        np.save(self.projectDirectory + 'Frames/FirstDataCount.npy', counts)
        np.save(self.projectDirectory + 'Frames/StdevCount.npy', std)

        self._print('DiagnoseSpeed: Rate: ' + str(counter / time))

        self._print(
            'FirstFrameCaptured: FirstFrame: Frames/FirstFrame.npy,,GoodDataCount: Frames/FirstDataCount.npy,,StdevCount: Frames/StdevCount.npy'
        )

    def _captureFrame(self,
                      endtime,
                      new_background=False,
                      max_frames=40,
                      stdev_threshold=25,
                      snapshots=False):
        # Captures time averaged frame of depth data

        sums = np.zeros(shape=(self.r[3], self.r[2]))
        n = np.zeros(shape=(self.r[3], self.r[2]))
        stds = np.zeros(shape=(self.r[3], self.r[2]))

        current_time = datetime.datetime.now()
        if current_time >= endtime:
            return

        counter = 1
        while True:
            all_data = np.empty(shape=(int(max_frames), self.r[3], self.r[2]))
            all_data[:] = np.nan
            for i in range(0, max_frames):
                all_data[i] = self._returnDepth()
                current_time = datetime.datetime.now()

                if snapshots:
                    self._print('SnapshotCaptured: NpyFile: Frames/Snapshot_' +
                                str(counter).zfill(6) + '.npy,,Time: ' +
                                str(current_time) + ',,GP: ' +
                                str(np.count_nonzero(~np.isnan(all_data[i]))))
                    np.save(
                        self.projectDirectory + 'Frames/Snapshot_' +
                        str(counter).zfill(6) + '.npy', all_data[i])

                counter += 1

                if current_time >= endtime:
                    break

            med = np.nanmedian(all_data, axis=0)
            med[np.isnan(med)] = 0
            std = np.nanstd(all_data, axis=0)
            med[np.isnan(std)] = 0
            med[std > stdev_threshold] = 0
            std[std > stdev_threshold] = 0
            counts = np.count_nonzero(~np.isnan(all_data), axis=0)
            med[counts < 3] = 0
            std[counts < 3] = 0

            sums += med
            stds += std

            med[med > 1] = 1
            n += med
            current_time = datetime.datetime.now()
            if current_time >= endtime:
                break

        avg_med = sums / n
        avg_std = stds / n
        color = self._returnRegColor()
        num_frames = int(max_frames * (n.max() - 1) + i + 1)

        self._print('FrameCaptured: NpyFile: Frames/Frame_' +
                    str(self.frameCounter).zfill(6) +
                    '.npy,,PicFile: Frames/Frame_' +
                    str(self.frameCounter).zfill(6) + '.jpg,,Time: ' +
                    str(endtime) + ',,NFrames: ' + str(num_frames) +
                    ',,AvgMed: ' + '%.2f' % np.nanmean(avg_med) +
                    ',,AvgStd: ' + '%.2f' % np.nanmean(avg_std) + ',,GP: ' +
                    str(np.count_nonzero(~np.isnan(avg_med))))

        np.save(
            self.projectDirectory + 'Frames/Frame_' +
            str(self.frameCounter).zfill(6) + '.npy', avg_med)
        matplotlib.image.imsave(
            self.projectDirectory + 'Frames/Frame_' +
            str(self.frameCounter).zfill(6) + '.jpg', color)
        self.frameCounter += 1
        if new_background:
            self._print(
                'BackgroundCaptured: NpyFile: Backgrounds/Background_' +
                str(self.backgroundCounter).zfill(6) +
                '.npy,,PicFile: Backgrounds/Background_' +
                str(self.backgroundCounter).zfill(6) + '.jpg,,Time: ' +
                str(endtime) + ',,NFrames: ' + str(num_frames) + ',,AvgMed: ' +
                '%.2f' % np.nanmean(avg_med) + ',,AvgStd: ' +
                '%.2f' % np.nanmean(avg_std) + ',,GP: ' +
                str(np.count_nonzero(~np.isnan(avg_med))))
            np.save(
                self.projectDirectory + 'Backgrounds/Background_' +
                str(self.backgroundCounter).zfill(6) + '.npy', avg_med)
            matplotlib.image.imsave(
                self.projectDirectory + 'Backgrounds/Background_' +
                str(self.backgroundCounter).zfill(6) + '.jpg', color)
            self.backgroundCounter += 1

        return avg_med

    def _uploadFiles(self):
        self._modifyPiGS(status='Finishing converting and uploading of videos')
        for p in self.processes:
            p.communicate()

        for movieFile in os.listdir(self.videoDirectory):
            if '.h264' in movieFile:
                command = ['python3', 'Modules/processVideo.py', movieFile]
                command += [
                    self.loggerFile, self.projectDirectory,
                    self.cloudVideoDirectory
                ]
                self._print(command)
                self.processes.append(subprocess.Popen(command))

        for p in self.processes:
            p.communicate()

        self._modifyPiGS(status='Finishing upload of frames and backgrounds')

        # Move files around as appropriate
        prepDirectory = self.projectDirectory + 'PrepFiles/'
        shutil.rmtree(prepDirectory) if os.path.exists(prepDirectory) else None
        os.makedirs(prepDirectory)

        lp = LP.LogParser(self.loggerFile)

        self.frameCounter = lp.lastFrameCounter + 1

        videoObj = [
            x for x in lp.movies if x.time.hour >= 8 and x.time.hour <= 20
        ][0]
        subprocess.call([
            'cp', self.projectDirectory + videoObj.pic_file,
            prepDirectory + 'PiCameraRGB.jpg'
        ])

        subprocess.call([
            'cp', self.projectDirectory + lp.movies[-1].pic_file,
            prepDirectory + 'LastPiCameraRGB.jpg'
        ])

        # Find depthfile that is closest to the video file time
        depthObj = [x for x in lp.frames if x.time > videoObj.time][0]

        subprocess.call([
            'cp', self.projectDirectory + depthObj.pic_file,
            prepDirectory + 'DepthRGB.jpg'
        ])

        if not os.path.isdir(self.frameDirectory):
            self._modifyPiGS(status='Error: ' + self.frameDirectory +
                             ' does not exist.')
            return

        if not os.path.isdir(self.backgroundDirectory):
            self._modifyPiGS(status='Error: ' + self.backgroundDirectory +
                             ' does not exist.')
            return

        subprocess.call([
            'cp', self.frameDirectory + 'Frame_000001.npy',
            prepDirectory + 'FirstDepth.npy'
        ])
        subprocess.call([
            'cp', self.frameDirectory + 'Frame_' +
            str(self.frameCounter - 1).zfill(6) + '.npy',
            prepDirectory + 'LastDepth.npy'
        ])
        subprocess.call([
            'tar', '-cvf', self.projectDirectory + 'Frames.tar', '-C',
            self.projectDirectory, 'Frames'
        ])
        subprocess.call([
            'tar', '-cvf', self.projectDirectory + 'Backgrounds.tar', '-C',
            self.projectDirectory, 'Backgrounds'
        ])

        #shutil.rmtree(self.frameDirectory) if os.path.exists(self.frameDirectory) else None
        #shutil.rmtree(self.backgroundDirectory) if os.path.exists(self.backgroundDirectory) else None

        #        subprocess.call(['python3', '/home/pi/Kinect2/Modules/UploadData.py', self.projectDirectory, self.projectID])
        print([
            'rclone', 'copy', self.projectDirectory,
            self.cloudMasterDirectory + self.projectID + '/'
        ])
        subprocess.call([
            'rclone', 'copy', self.projectDirectory + 'Frames.tar',
            self.cloudMasterDirectory + self.projectID + '/'
        ])
        subprocess.call([
            'rclone', 'copy', self.projectDirectory + 'Backgrounds.tar',
            self.cloudMasterDirectory + self.projectID + '/'
        ])
        subprocess.call([
            'rclone', 'copy', self.projectDirectory + 'PrepFiles/',
            self.cloudMasterDirectory + self.projectID + '/PrepFiles/'
        ])
        subprocess.call([
            'rclone', 'copy', self.projectDirectory + 'Videos/',
            self.cloudMasterDirectory + self.projectID + '/Videos/'
        ])
        subprocess.call([
            'rclone', 'copy', self.projectDirectory + 'Logfile.txt/',
            self.cloudMasterDirectory + self.projectID
        ])
        subprocess.call([
            'rclone', 'copy', self.projectDirectory + 'ProcessLog.txt/',
            self.cloudMasterDirectory + self.projectID
        ])

        try:
            self._modifyPiGS(status='Checking upload to see if it worked')
            """
            The 'rclone check' command checks for differences between the hashes of both
            source and destination files, after the files have been uploaded. If the
            check fails, the program returns non-zero exit status and the error is stored
            in CalledProcessError class of the subprocess module.
            """
            subprocess.run([
                'rclone', 'check', self.projectDirectory + 'Frames.tar',
                self.cloudMasterDirectory + self.projectID + '/'
            ],
                           check=True)
            subprocess.run([
                'rclone', 'check', self.projectDirectory + 'Backgrounds.tar',
                self.cloudMasterDirectory + self.projectID + '/'
            ],
                           check=True)
            subprocess.run([
                'rclone', 'check', self.projectDirectory + 'PrepFiles/',
                self.cloudMasterDirectory + self.projectID + '/PrepFiles/'
            ],
                           check=True)
            subprocess.run([
                'rclone', 'check', self.projectDirectory + 'Videos/',
                self.cloudMasterDirectory + self.projectID + '/Videos/'
            ],
                           check=True)
            subprocess.run([
                'rclone', 'check', self.projectDirectory + 'Logfile.txt/',
                self.cloudMasterDirectory + self.projectID
            ],
                           check=True)
            subprocess.run([
                'rclone', 'check', self.projectDirectory + 'ProcessLog.txt/',
                self.cloudMasterDirectory + self.projectID
            ],
                           check=True)

            self._modifyPiGS(status='UploadSuccessful, ready for delete')
        except subprocess.CalledProcessError:
            self._modifyPiGS(status='UploadFailed, Need to rerun')

    def _closeFiles(self):
        try:
            self._print('MasterRecordStop: ' + str(datetime.datetime.now()))
            self.lf.close()
        except AttributeError:
            pass
        try:
            if self.system == 'mac':
                self.caff.kill()
        except AttributeError:
            pass
Exemplo n.º 13
0
    plt.title(framefile)
    plt.show()


parser = argparse.ArgumentParser()
parser.add_argument('User1',
                    type=str,
                    help='Which user annotations to compare')
parser.add_argument('User2',
                    type=str,
                    help='Which user annotations to compare')

args = parser.parse_args()

fm_obj = FM()
fm_obj.downloadAnnotationData('BoxedFish')
dt = pd.read_csv(fm_obj.localBoxedFishFile)

all_dt = pd.merge(dt[dt.User == args.User1],
                  dt[dt.User == args.User2],
                  how='inner',
                  on='Framefile')
grouped = all_dt.groupby('Framefile').max()
print('Number of frames with annotations from both: ')
print(
    pd.pivot_table(grouped,
                   values='Nfish_y',
                   columns=['ProjectID_x'],
                   aggfunc='count'))
print('Number of frames with agreements from both: ')