def check_upload_files(): """Looks for files in ./media, if found -> uploads them to S3 and deltes them after """ # - Initialize variables: reporting_program_name = 'functions.check_upload_files.py' # - Recover the bucket name from the settings file: bucket_name = open('settings.txt', 'r').readlines() bucket_name = str(bucket_name[1]) bucket_name = bucket_name[:-1] # - Check for files: files = os.listdir("./media") for file in files: # - Rename the file (add the path to it): file = "media/" + file # - Upload the file to the S3 bucket: print(" ") temp_str = "file_monitor.py: Found file", file, ". Uploading..." print(temp_str) try: upload_file(file_name=file, bucket=bucket_name) print("- uploaded!") # - Remove the file: os.remove(file) print("- removed!") # - Write to the log: event_message = 'Detected, uploaded and removed file ' + file log_event(reporting_program_name, event_message) except Exception as error_message: log_error(reporting_program_name, error_message) print("- failed. Check the error.log")
def move_component_object(CurrentIngest): ''' Put the current component object into the package object folder ''' currentTargetObject = CurrentIngest.currentTargetObject status = 'OK' if currentTargetObject.topLevelObject == True: objectDir = CurrentIngest.packageObjectDir sys.argv = [ '', '-i' + currentTargetObject.inputPath, '-d' + objectDir, '-L' + CurrentIngest.packageLogDir, '-m' ] event = 'replication' outcome = 'migrate object to SIP at {}'.format(objectDir) CurrentIngest.caller = 'rsync' try: moveNcopy.main() status = 'OK' currentTargetObject.inputPath = currentTargetObject.update_path( currentTargetObject.inputPath, objectDir) except: status = 'FAIL' loggers.log_event(CurrentIngest, event, outcome, status) CurrentIngest.caller = None # elif currentTargetObject.isDocumentation: else: pass if not status == 'FAIL': update_input_path(CurrentIngest) return True
def take_video(video_lenght=5): """Takes video of lenght=video_lenght in seconds, and saves it in ./media """ reporting_program_name = 'functions.take_video' # - Get the current time: ts = time.time() st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S') # - Record the video: try: camera = PiCamera() camera.resolution = (1024, 768) camera.start_preview() file_name = './media/video_{}h_{}s.h264'.format(st, video_lenght) camera.start_recording(file_name) time.sleep(video_lenght) camera.stop_recording() camera.stop_preview() camera.close() # - Log event: event_message = 'Created video {}'.format(file_name) log_event(reporting_program_name, event_message) except Exception as error_message: # - Log error: log_error(reporting_program_name, error_message)
def concat_access_files(CurrentIngest, wrapper): inputPath = CurrentIngest.accessPath sys.argv = [ '', '-i' + inputPath, '-c' + CurrentIngest.InputObject.canonicalName, '-w' + wrapper ] success = False concattedAccessFile, success = concatFiles.main() if not success == False: concatObject = ingestClasses.ComponentObject( concattedAccessFile, objectCategoryDetail='access file', topLevelObject=False) CurrentIngest.InputObject.ComponentObjects.append(concatObject) CurrentIngest.currentTargetObject = concatObject concatObject.metadataDirectory = \ os.path.join( CurrentIngest.packageMetadataObjects, 'resourcespace' ) get_file_metadata(CurrentIngest, objectCategoryDetail='access file') outcome = ("Component files concatenated " "into an access copy at {}".format(concattedAccessFile)) status = "OK" # set the 'filenaeme' to the concat file so we can log in the db loggers.insert_object(CurrentIngest, objectCategory='file', objectCategoryDetail='concatenated access file') else: status = "FAIL" outcome = ("Component files could not be concatenated. " "Probably you need to check the file specs? " "Here's the output of the attempt:\n{}\n" "".format(concattedAccessFile)) CurrentIngest.ingestResults['notes'] += outcome CurrentIngest.currentTargetObject = CurrentIngest CurrentIngest.caller = CurrentIngest.ProcessArguments.ffmpegVersion loggers.log_event(CurrentIngest, event='creation', outcome=outcome, status=status) CurrentIngest.caller = None CurrentIngest.currentTargetObject = None return concattedAccessFile
def check_av_status(CurrentIngest): ''' Check whether or not a file is recognized as an a/v object. ''' avStatus = False event = 'format identification' CurrentIngest.caller = 'pymmFunctions.is_av()' AV = pymmFunctions.is_av(CurrentIngest.currentTargetObject.inputPath) if not AV: outcome = "WARNING: {} is not recognized as an a/v object.".format( CurrentIngest.currentTargetObject.inputPath) status = "WARNING" print(outcome) else: outcome = "{} is a(n) {} object, way to go.".format( CurrentIngest.currentTargetObject.inputPath, AV) status = "OK" avStatus = True loggers.log_event(CurrentIngest, event, outcome, status) return avStatus, AV
def read_button_and_change_state(arm_state, state_file): if arm_state == 1: button_state = GPIO.input(13) if button_state == True: if confirm_button_press() == True: arm_state = 2 # - Record UNAMED -> ARMING in the event log: reporting_program_name = 'rpi_home_guardian.py' event_message = 'Button pressed. Arming the home guardian...' log_event(reporting_program_name, event_message) # - Record the state to the file: save_state(state_file, arm_state) else: pass else: pass time.sleep(0.5) elif arm_state == 2: button_state = GPIO.input(13) if button_state == True: if confirm_button_press() == True: arm_state = 1 # - Record ARMING -> UNARMED in the event log: reporting_program_name = 'rpi_home_guardian.py' event_message = 'Button pressed. Arming aborted. DISARMED.' log_event(reporting_program_name, event_message) # - Record the state to the file: save_state(state_file, arm_state) else: pass else: pass elif arm_state == 3: button_state = GPIO.input(13) if button_state == True: if confirm_button_press() == True: arm_state = 1 # - Record ARMED -> UNARMED in the event log: reporting_program_name = 'rpi_home_guardian.py' event_message = 'Button pressed. Home guardian DISARMED' log_event(reporting_program_name, event_message) # - Record the state to the file: save_state(state_file, arm_state) else: pass else: pass return arm_state
def main(): ######################### #### SET INGEST ARGS #### args = set_args() inputPath = args.inputPath user = args.user objectJSON = args.metadataJSON databaseReporting = args.databaseReporting ingestType = args.ingestType makeProres = args.makeProres concatChoice = args.concatAccessFiles cleanupStrategy = args.cleanup_originals overrideOutdir = args.outdir_ingestsip overrideAIPdir = args.aip_staging overrideRS = args.resourcespace_deliver mono = args.mono_access_copy combineAudio = args.combine_audio_streams # init some objects CurrentProcess = ingestClasses.ProcessArguments( user, objectJSON, databaseReporting, ingestType, makeProres, concatChoice, cleanupStrategy, overrideOutdir, overrideAIPdir, overrideRS, mono, combineAudio) CurrentObject = ingestClasses.InputObject(inputPath) CurrentIngest = ingestClasses.Ingest(CurrentProcess, CurrentObject) prepped = CurrentIngest.prep_package( CurrentIngest.tempID, CurrentIngest.ProcessArguments.outdir_ingestsip) if not prepped: print(CurrentIngest.ingestResults) return CurrentIngest else: CurrentIngest.accessPath = os.path.join(CurrentIngest.packageObjectDir, 'resourcespace') # Run some checks on directory inputs: if CurrentIngest.InputObject.inputType == 'dir': if CurrentIngest.InputObject.structureCompliance == False: CurrentIngest.currentTargetObject = CurrentIngest pymmFunctions.cleanup_package( CurrentIngest, CurrentIngest.packageOutputDir, "ABORTING", CurrentIngest.InputObject.inputTypeDetail) CurrentIngest.ingestResults[ 'abortReason'] = CurrentIngest.InputObject.inputTypeDetail print(CurrentIngest.ingestResults) return CurrentIngest else: remove_system_files(CurrentIngest) #### END SET INGEST ARGS #### ############################# ########################### #### LOGGING / CLEANUP #### # start a log file for this ingest CurrentIngest.create_ingestLog() # insert a database record for this SIP as an 'intellectual entity' CurrentIngest.currentTargetObject = CurrentIngest loggers.insert_object(CurrentIngest, objectCategory='intellectual entity', objectCategoryDetail='Archival Information Package') # tell the various logs that we are starting CurrentIngest.caller = 'ingestSIP.main()' loggers.log_event(CurrentIngest, event='ingestion start', outcome="SYSTEM INFO:\n{}".format( pymmFunctions.system_info()), status='STARTING') # reset variables CurrentIngest.caller = None # Log any filename outliers as a WARNING if CurrentIngest.InputObject.outlierComponents != []: CurrentIngest.currentTargetObject = CurrentIngest loggers.log_event( CurrentIngest, event = 'validation', outcome = 'test of input filenames '\ 'reveals these outliers that may not belong: {}'.format( ';'.join(CurrentIngest.InputObject.outlierComponents) ), status = 'WARNING' ) # Send existing descriptive metadata JSON to the object metadata directory if CurrentIngest.ProcessArguments.objectJSON != None: copy = shutil.copy2(CurrentIngest.ProcessArguments.objectJSON, CurrentIngest.packageMetadataDir) # reset var to new path CurrentIngest.ProcessArguments.objectJSON = os.path.abspath(copy) makePbcore.add_physical_elements( CurrentIngest.InputObject.pbcoreXML, CurrentIngest.ProcessArguments.objectJSON) else: # if no descriptive metadata, just use a container pbcore.xml w/o a # representation of the physical/original asset # this is created in the __init__ of an InputObject pass # Write the XML to file CurrentIngest.InputObject.pbcoreFile = makePbcore.xml_to_file( CurrentIngest.InputObject.pbcoreXML, os.path.join(CurrentIngest.packageMetadataDir, CurrentIngest.InputObject.canonicalName + "_pbcore.xml")) if os.path.exists(CurrentIngest.InputObject.pbcoreFile): status = 'OK' else: status = 'Fail' CurrentIngest.caller = 'pbcore.PBCoreDocument() , makePbcore.xml_to_file()' loggers.short_log(CurrentIngest, event='metadata extraction', outcome='make pbcore representation', status=status) CurrentIngest.caller = None CurrentIngest.currentTargetObject = None #### END LOGGING / CLEANUP #### ############################### ######################## #################### ## DO STUFF! ## #################### ######################### ######################## # insert the objects # for item in CurrentIngest.InputObject.ComponentObjects: # print(item.inputPath) for _object in CurrentIngest.InputObject.ComponentObjects: CurrentIngest.currentTargetObject = _object loggers.insert_object(CurrentIngest, _object.objectCategory, _object.objectCategoryDetail) if _object.isDocumentation: move_component_object(CurrentIngest) CurrentIngest.currentTargetObject = None for _object in CurrentIngest.InputObject.ComponentObjects: CurrentIngest.currentTargetObject = _object if _object.objectCategoryDetail == 'film scanner output reel': # create a component object for each WAV and DPX # component of a film scan for item in os.scandir(_object.inputPath): if os.path.isfile(item.path): objectCategory = 'file' objectCategoryDetail = 'preservation master audio' elif os.path.isdir(item.path): objectCategory = 'intellectual entity' objectCategoryDetail = 'preservation master image sequence' subObject = ingestClasses.ComponentObject( item.path, objectCategory=objectCategory, objectCategoryDetail=objectCategoryDetail, topLevelObject=False) CurrentIngest.InputObject.ComponentObjects.append(subObject) CurrentIngest.currentTargetObject = subObject loggers.insert_object(CurrentIngest, subObject.objectCategory, subObject.objectCategoryDetail) # reset the currentTargetObject CurrentIngest.currentTargetObject = _object move_component_object(CurrentIngest) CurrentIngest.currentTargetObject = None for _object in CurrentIngest.InputObject.ComponentObjects: CurrentIngest.currentTargetObject = _object if not _object.isDocumentation: if not _object.objectCategoryDetail == 'film scanner output reel': # we log metadata for the scanner output # components individually if _object.objectCategoryDetail == 'access file': level = 'Access file' _object.metadataDirectory = \ os.path.join( CurrentIngest.packageMetadataObjects, 'resourcespace' ) else: level = 'Preservation master' _object.metadataDirectory = \ CurrentIngest.packageMetadataObjects add_pbcore_instantiation(CurrentIngest, level) get_file_metadata(CurrentIngest, _object.objectCategoryDetail) # note: # this logs status = OK whether or not it is actually ok. @fixme loggers.pymm_log( CurrentIngest, event='metadata extraction', outcome='calculate input file technical metadata', status='OK') else: pass if _object.topLevelObject == True: # but the access file is made by calling the scanner # output as a whole isSequence, rsPackage = None, None if _object.objectCategoryDetail == 'film scanner output reel': isSequence = True if 'multi' in CurrentIngest.InputObject.inputTypeDetail: rsPackage = True if CurrentIngest.includesSubmissionDocumentation: rsPackage = True _object.accessPath = make_derivs(CurrentIngest, rsPackage=rsPackage, isSequence=isSequence) check_av_status( CurrentIngest ) ## IS THIS REDUNDANT? IT AT LEAST LOGS THE CHECK... @fixme else: pass CurrentIngest.currentTargetObject = None if CurrentIngest.ProcessArguments.concatChoice == True: av = [ x for x in CurrentIngest.InputObject.ComponentObjects \ if x.objectCategoryDetail == 'access file' ] avType = av[0].avStatus if avType == 'AUDIO': wrapper = 'mp3' elif avType == "VIDEO": wrapper = 'mp4' concatPath = concat_access_files(CurrentIngest, wrapper) if os.path.exists(concatPath): deliver_concat_access(CurrentIngest, concatPath) if CurrentIngest.includesSubmissionDocumentation: deliver_documentation(CurrentIngest) ### END ACTUAL STUFF DOING ### ############################## ############################### ## FLUFF THE SIP FOR STAGING ## # set the logging 'filename' to the UUID for the rest of the process CurrentIngest.currentTargetObject = CurrentIngest # rename SIP from temp to UUID; _SIP is the staged UUID path _SIP = rename_SIP(CurrentIngest) # put the package into a UUID parent folder envelop_SIP(CurrentIngest) # make a hashdeep manifest for the objects directory objectManifestPath = makeMetadata.make_hashdeep_manifest( CurrentIngest.packageObjectDir, CurrentIngest.ingestUUID, 'objects') CurrentIngest.objectManifestPath = objectManifestPath outcome = ('create hashdeep manifest ' 'for objects directory at {}'.format(objectManifestPath)) status = 'OK' CurrentIngest.caller = 'hashdeep' if not os.path.isfile(objectManifestPath): status = 'FAIL' outcome = 'could not ' + outcome else: pass eventID = loggers.short_log(CurrentIngest, event='message digest calculation', outcome=outcome, status=status) # report data from the object manifest report_SIP_fixity(CurrentIngest, eventID) CurrentIngest.caller = None # also add md5 and filename for each object as identifiers # to the pbcore record add_pbcore_md5_location(CurrentIngest) # report characteristics of the SIP's objects if CurrentIngest.ProcessArguments.databaseReporting == True: loggers.insert_obj_chars(CurrentIngest) ##### # AT THIS POINT THE SIP IS FULLY FORMED SO LOG IT AS SUCH CurrentIngest.currentTargetObject = CurrentIngest CurrentIngest.caller = 'ingestSIP.main()' loggers.short_log(CurrentIngest, event='information package creation', outcome='assemble SIP into valid structure', status='OK') CurrentIngest.caller = None # recursively set SIP and manifest to 777 file permission chmodded = pymmFunctions.recursive_chmod(_SIP) CurrentIngest.caller = 'Python3 os.chmod(x,0o777)' if chmodded: status = 'OK' else: status = 'FAIL' loggers.short_log( CurrentIngest, event='modification', outcome='recursively set SIP and manifest file permissions to 777', status=status) CurrentIngest.caller = None packageVerified = False # move the SIP if needed if not CurrentIngest.ProcessArguments.aip_staging == \ CurrentIngest.ProcessArguments.outdir_ingestsip: _SIP = stage_sip(CurrentIngest) # audit the hashdeep manifest objectsVerified = False auditOutcome, objectsVerified = makeMetadata.hashdeep_audit( _SIP, objectManifestPath, _type='objects') CurrentIngest.caller = 'hashdeep' if objectsVerified == True: status = 'OK' outcome = ( 'Objects directory verified against hashdeep manifest: \n{}' ''.format(auditOutcome)) else: status = "WARNING" outcome = ("Objects directory failed hashdeep audit. " "Some files may be missing or damaged!" "Check the hashdeep audit: \n{}".format(auditOutcome)) loggers.log_event(CurrentIngest, event='fixity check', outcome=outcome, status=status) CurrentIngest.caller = None else: objectsVerified = True validSIP, validationOutcome = pymmFunctions.validate_SIP_structure(_SIP) if not validSIP: # IS QUITTING HERE EXCESSIVE?? MAYBE JUST LOG # THAT IT DIDN'T PASS MUSTER BUT SAVE IT. # OR MAKE AN "INCONCLUSIVE/WARNING" VERSION? # NOTE: the failure gets logged in the system log, # along with listing reasons for failure # then the abort message is logged in cleanup_package() pymmFunctions.cleanup_package(CurrentIngest, _SIP, "ABORTING", validationOutcome) CurrentIngest.ingestResults['abortReason'] = validationOutcome print(CurrentIngest.ingestResults) return CurrentIngest else: CurrentIngest.caller = 'pymmFunctions.validate_SIP_structure()' loggers.log_event(CurrentIngest, event="validation", outcome="SIP validated against expected structure", status="OK") CurrentIngest.caller = None if objectsVerified and validSIP: CurrentIngest.ingestResults['status'] = True CurrentIngest.ingestResults['ingestUUID'] = CurrentIngest.ingestUUID # ingestResults.accessPath should be changed to "accessDelivery" once EDITH # ingest-refactor is merged to master # @fixme CurrentIngest.ingestResults['accessPath'] = CurrentIngest.accessDelivery # THIS IS THE LAST CALL MADE TO MODIFY ANYTHING IN THE SIP. loggers.ingest_log( CurrentIngest, event='ingestion end', outcome='Submission Information Package verified and staged', status='ENDING') # make a hashdeep manifest manifestPath = makeMetadata.make_hashdeep_manifest( _SIP, CurrentIngest.ingestUUID, 'hashdeep') CurrentIngest.caller = 'hashdeep' if os.path.isfile(manifestPath): loggers.end_log( CurrentIngest, event = 'message digest calculation', outcome = 'create hashdeep manifest '\ 'for SIP at {}'.format(manifestPath), status = 'OK' ) do_cleanup(CurrentIngest, objectsVerified, 'done') CurrentIngest.caller = 'ingestSIP.main()' loggers.end_log( CurrentIngest, event='ingestion end', outcome='Submission Information Package verified and staged', status='ENDING') if CurrentIngest.ingestResults['status'] == True: print("####\nEVERYTHING WENT GREAT! " "THE SIP IS GOOD TO GO! @{}\n####".format(_SIP)) else: print("####\nSOMETHING DID NOT GO AS PLANNED. " "CHECK THE LOG FOR MORE DETAILS!\n####") print(CurrentIngest.ingestResults) return CurrentIngest
def make_derivs(CurrentIngest, rsPackage=None, isSequence=None): ''' Make derivatives based on options declared in config... ''' initialObject = CurrentIngest.currentTargetObject inputPath = initialObject.inputPath packageObjectDir = CurrentIngest.packageObjectDir packageLogDir = CurrentIngest.packageLogDir packageMetadataObjects = CurrentIngest.packageMetadataObjects makeProres = CurrentIngest.ProcessArguments.makeProres ingestType = CurrentIngest.ProcessArguments.ingestType resourcespace_deliver = CurrentIngest.ProcessArguments.resourcespace_deliver mono = CurrentIngest.ProcessArguments.mono combineAudio = CurrentIngest.ProcessArguments.combineAudio # make an enclosing folder for access copies if the input is a # group of related video files if rsPackage != None: rsPackageDelivery = make_rs_package( CurrentIngest.InputObject.canonicalName, resourcespace_deliver) CurrentIngest.accessDelivery = rsPackageDelivery else: rsPackageDelivery = None # we'll always output a resourcespace access file for video ingests, # so init the derivtypes list with `resourcespace` if ingestType in ('film scan', 'video transfer', 'audio xfer'): derivTypes = ['resourcespace'] # deliveredDerivPaths is a dict as follows: # {derivtype1:/path/to/deriv/file1} deliveredDerivPaths = {} if pymmFunctions.boolean_answer( config['deriv delivery options']['proresHQ']): derivTypes.append('proresHQ') elif makeProres == True: derivTypes.append('proresHQ') else: pass for derivType in derivTypes: sysargs = [ '', '-i' + inputPath, '-o' + packageObjectDir, '-d' + derivType, '-L' + packageLogDir ] if rsPackageDelivery != None: sysargs.append('-r' + rsPackageDelivery) if isSequence: sysargs.append('-s') if mono: # select to mixdown audio to mono sysargs.append('-m') if combineAudio: # select to mix all audio tracks to one stereo track sysargs.append('-k') sys.argv = sysargs deliveredDeriv = makeDerivs.main(CurrentIngest) deliveredDerivPaths[derivType] = deliveredDeriv event = 'migration' CurrentIngest.caller = CurrentIngest.ProcessArguments.ffmpegVersion if pymmFunctions.is_av(deliveredDeriv): outcome = 'create access copy at {}'.format(deliveredDeriv) status = 'OK' else: outcome = 'could not create access copy' status = 'FAIL' loggers.log_event(CurrentIngest, event, outcome, status) CurrentIngest.caller = None for key, value in deliveredDerivPaths.items(): # metadata for each deriv is stored in a folder named # for the derivtype under the main Metadata folder mdDest = os.path.join(packageMetadataObjects, key) if not os.path.isdir(mdDest): os.mkdir(mdDest) if os.path.isfile(value): # if the new access file exists, # create it as a ComponentObject object and # add it to the list in InputObject newObject = ingestClasses.ComponentObject( value, objectCategoryDetail='access file', topLevelObject=False) newObject.metadataDirectory = mdDest CurrentIngest.InputObject.ComponentObjects.append(newObject) CurrentIngest.currentTargetObject = newObject loggers.insert_object(CurrentIngest, objectCategory='file', objectCategoryDetail='access file') else: pass # get a return value that is the path to the access copy(ies) delivered # to a destination defined in config.ini # * for a single file it's the single deriv path # * for a folder of files it's the path to the enclosing deriv folder # # this path is used to make an API call to resourcespace if rsPackageDelivery not in ('', None): accessPath = rsPackageDelivery else: SIPaccessPath = deliveredDerivPaths['resourcespace'] deliveredAccessBase = os.path.basename(SIPaccessPath) accessPath = os.path.join(resourcespace_deliver, deliveredAccessBase) CurrentIngest.ingestResults['accessPath'] = accessPath CurrentIngest.accessDelivery = accessPath return accessPath
import functions import time import os from loggers import log_error, log_event import RPi.GPIO as GPIO GPIO.setwarnings(False) GPIO.setmode(GPIO.BOARD) GPIO.setup(15, GPIO.OUT) # LED # - Log starting event: reporting_program_name = 'rpi_home_guardian.py' event_message = 'Home guardian booting up' log_event(reporting_program_name, event_message) # - Read initial state (1=unarmed, 2=arming, 3=armed): arm_state = 3 # Defaults to ARMED state_file = 'arm_state.txt' if not os.path.exists(state_file): print('- No arm_state file, creating...') functions.save_state(state_file, arm_state) print("-- Done!") else: try: arm_state = functions.read_state(state_file) except Exception as error_message: log_error(reporting_program_name, error_message) print("- Failed to load the arm_state. Removing the file...") os.remove(state_file) arm_state = 3 event_message = 'Error handeled. Removed the corrupted arm_state file. State set to ARMED'