def __init__(self, projectID, workers=None): self.projectID = projectID self.workers = workers self.fileManager = FM() self.projFileManager = self.fileManager.retProjFileManager(projectID) self.mlFileManager = self.fileManager.retMLFileManager()
def __init__(self, projectID, workers=None): self.projectID = projectID if not self._checkProjectID(): raise Exception(projectID + ' is not valid.') self.workers = workers self.fileManager = FM(projectID=projectID)
def __init__(self): # 1: Define valid commands and ignore warnings self.commands = [ 'New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete', 'Snapshots' ] np.seterr(invalid='ignore') # 2: Determine which Kinect is attached (This script can handle v1 or v2 Kinects) self._identifyDevice() #Stored in self.device # 3: Create file manager self.fileManager = FM() # 4: Download credential files self.fileManager.downloadData(self.localCredentialSpreadsheet) self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet # Rename to make code readable # 5: Identify credential files (Credential files for uploading updates to Google Drive are found here) self.credentialSpreadsheet = self.masterDirectory + 'CredentialFiles/SAcredentials.json' # 6: Connect to Google Spreadsheets self._authenticateGoogleSpreadSheets() #Creates self.controllerGS self._modifyPiGS(error='') # 7: Start PiCamera try: from picamera import PiCamera self.camera = PiCamera() self.camera.resolution = (1296, 972) self.camera.framerate = 30 self.piCamera = 'True' except Exception: self.piCamera = 'False' # 8: Keep track of processes spawned to convert and upload videofiles self.processes = [] # 9: Await instructions self.monitorCommands()
def __init__(self): # 1: Define valid commands and ignore warnings self.commands = [ 'New', 'Restart', 'Stop', 'Rewrite', 'UploadData', 'LocalDelete', 'Snapshots' ] np.seterr(invalid='ignore') # 2: Determine which Kinect is attached (This script can handle v1 or v2 Kinects) self._identifyDevice() #Stored in self.device self.system = platform.node() # 3: Create file manager self.fileManager = FM() # 4: Download credential files self.fileManager.downloadData( self.fileManager.localCredentialSpreadsheet) self.fileManager.downloadData(self.fileManager.localCredentialDrive) self.credentialSpreadsheet = self.fileManager.localCredentialSpreadsheet # Rename to make code readable # 5: Connect to Google Spreadsheets self._authenticateGoogleSpreadSheets() #Creates self.controllerGS self._modifyPiGS(error='') # 6: Start PiCamera self.camera = PiCamera() self.camera.resolution = (1296, 972) self.camera.framerate = 30 self.piCamera = 'True' # 7: Keep track of processes spawned to convert and upload videofiles self.processes = [] # 8: Set size of frame self.r = (0, 0, 640, 480) # 9: Await instructions self.monitorCommands()
from Modules.FileManagers.FileManager import FileManager as FM import subprocess, pdb pdb.set_trace() anFM_obj = FM().retAnFileManager() labeledClipsDir = anFM_obj.prepareVideoAnnotation('10classLabels') #labeledClipsDir = '/Users/pmcgrath7/Temp/CichlidAnalyzer/__AnnotatedData/LabeledVideos/10classLabels/LabeledClips/' #pdb.set_trace() #subprocess.run(['python3', 'Modules/MachineLearning/3D_resnet.py', '--data', labeledClipsDir])
'--Practice', action='store_true', help='Use if you dont want to save annotations') parser.add_argument('-i', '--Initials', type=str, help='Initials to save annotations') args = parser.parse_args() if args.Initials is None: initials = socket.gethostname() else: initials = args.Initials fm_obj = FM(projectID=args.ProjectID) fm_obj.createDirectory(fm_obj.localAnalysisDir) fm_obj.downloadData(fm_obj.localManualLabelClipsDir, tarred=True) fm_obj.downloadData(fm_obj.localLabeledClipsFile) temp_csv = fm_obj.localAnalysisDir + 'NewAnnotations.csv' # Read in annotations and create csv file for all annotations with the same user and projectID dt = pd.read_csv(fm_obj.localLabeledClipsFile, index_col='LID') new_dt = pd.DataFrame(columns=dt.columns) clips = [ x for x in os.listdir(fm_obj.localManualLabelClipsDir) if 'ManualLabel.mp4' in x ] categories = ['c', 'f', 'p', 't', 'b', 'm', 's', 'x', 'o', 'd', 'q', 'k']
parser = argparse.ArgumentParser( description='This command runs HMM analysis on a single row of data.') parser.add_argument('ProjectID', type=str, help='ProjectID to analyze') parser.add_argument('-n', '--Number', type=int, help='Limit annotation to x number of frames.') parser.add_argument('-p', '--Practice', action='store_true', help='Use if you dont want to save annotations') args = parser.parse_args() fileManager = FM() projFileManager = fileManager.retProjFileManager(args.ProjectID) projFileManager.downloadData('ObjectLabeler') anFileManager = fileManager.retAnFileManager() obj = ObjectLabeler(projFileManager.localManualLabelFramesDir, projFileManager.localLabeledFramesFile, args.Number, args.ProjectID) if not args.Practice: # Backup annotations. Redownload to avoid race conditions if not os.path.exists(projFileManager.localLabeledFramesFile): print(projFileManager.localLabeledFramesFile + 'does not exist. Did you annotate any new frames? Quitting...') else:
from Modules.FileManagers.FileManager import FileManager as FM import subprocess import pdb fm_obj = FM() pdb.set_trace() a = fm_obj.localOrganizedLabeledClipsDir b = fm_obj.local3DVideosDir #fm_obj.downloadAnnotationData('LabeledVideos') #subprocess.run(['python3', 'Modules/MachineLearning/3D_resnet.py', '--data', fm_obj.localOrganizedLabeledClipsDir, '--results', fm_obj.local3DVideosDir])
def __init__(self): __version__ = '1.0.0' self.fileManager = FM() self.projectTypes = [ 'Prep', 'Depth', 'Cluster', 'MLCluster', 'MLObject', 'Summary' ]