def input_from_local(session, log, project_string_id, http_input, file, directory_id): # TODO review how we want to handle header options # Especially if needs to be outside of function for python requests... # immediate_mode = request.headers['immediate_mode'] # Issues to be careful with ie string treamtment of 'True' vs True... immediate_mode = True input = Input() input.directory_id = directory_id if http_input['instance_list']: input.instance_list = {} input.instance_list['list'] = http_input['instance_list'] if http_input['frame_packet_map']: input.frame_packet_map = http_input['frame_packet_map'] # only need to make temp dir if file doesn't already exist... original_filename = secure_filename( file.filename ) # http://flask.pocoo.org/docs/0.12/patterns/fileuploads/ input.extension = os.path.splitext(original_filename)[1].lower() input.original_filename = os.path.split(original_filename)[1] input.temp_dir = tempfile.mkdtemp() input.temp_dir_path_and_filename = input.temp_dir + \ "/" + original_filename + input.extension project = Project.get(session, project_string_id) input.project = project input.media_type = None input.media_type = Process_Media.determine_media_type(input.extension) if not input.media_type: input.status = "failed" input.status_text = "Invalid file type: " + input.extension return False, log, input session.add(input) session.flush() with open(input.temp_dir_path_and_filename, "wb") as f: f.write(file.stream.read()) # For LOCAL not normal upload file_size_limit = 9 * 1024 * 1024 * 1024 file_size = os.path.getsize( input.temp_dir_path_and_filename) # gets size in bytes if file_size > file_size_limit: input.status = "failed" input.status_text = "Exceeded max file size" return False, log, input if immediate_mode == True or immediate_mode is None: # Leave this as a direct call for time being, as we pass # the input back to thing on front end process_media = Process_Media(session=session, input=input) result = process_media.main_entry() # Always return input along with file? if result == True: return True, log, input if result == False: return False, log, input # Default priority = 100 item = PrioritizedItem(priority=priority, input_id=input.id, media_type=input.media_type) add_item_to_queue(item) return True, log, input
def add_frame_to_queue(self, frame, index: int, original_filename: str, project: Project, directory_id, video, length, video_parent_file: File, global_frame_number=None, initial_global_frame=None): """ Where frame is: a HxWxN np.array, where N=1 for mask clips and N=3 for RGB clips. https://zulko.github.io/moviepy/ref/VideoClip/VideoClip.html Careful we don't have self. context here Cautions * We purposely do not not pass the job id, since we only want to original video to be added to the job Question, is it correct we create input class in part to maintain same concepts / format even for video frames? Answer: For example see frame_end_number is used to pass information Makes more sense to have it all in there then the PrioritizedItem() thing long term Also thinking in terms of logging And yes of course, then it's complete reuse of the component Jan 20, 2020 Note we purposely do NOT commit this as it creates unneeded db overhead, so instead we only use it as a local object to maintain consistency of design which means we do NOT want to add to add it a sesion ie self.session.add(input) """ input = Input() # Use input for class attributes, # but don't add it to the session for video? # TODO use File.new() for consistency here (ie as we add new things) # Single frame naming input.original_filename = original_filename + "_" + str(index) input.extension = ".jpg" input.media_type = "frame" input.temp_dir = tempfile.mkdtemp() input.project = project input.directory_id = directory_id input.parent_file_id = video_parent_file.id input.frame_packet_map = self.input.frame_packet_map # caution length is estimated. frame_count # is calculated as we roll through this so can't use it yet # Question: clarity on difference between numbers. # (I know estimate but still.) input.video_parent_length = length input.parent_input_id = self.input.id input.project_id = self.project.id # This is a temporary usage thing only # Note database persisted # Context of needing it to be defined so existing instances # Can use it (vs having to get video from db each time, # prior we defined this on first frame. input.root_blob_path_to_frames = video.root_blob_path_to_frames input = self.get_instance_list_from_packet_map( input=input, frame_number=index, global_frame_number=global_frame_number, initial_global_frame=initial_global_frame, from_video_split=self.input.type == 'from_video_split') """ For frame priority, the original genesis was doing the last frame last but, I think it also makese sense to process in order in general. An alternative would be to say put a flag on the last frame but using order feels like a more general solution, assuming no suprises or extra overhead. Storing frames Maybe don't attach video_parent_file because it leads to not bound errors in ORM fairly easily. """ # TODO, consider sending data as a "raw" blob # to cloud storage, then setting "processing deferred" to True here. # Process frames of videos started before new videos item = process_media.PrioritizedItem( priority=100 + index, # Process in frame priority input=input, raw_numpy_image=frame, file_is_numpy_array=True, video_id=video.id, frame_number=index, global_frame_number=global_frame_number, media_type=input.media_type) process_media.add_item_to_queue(item)