def status_update(n, current_time, label): ''' This function regularly updates the UI with the program configuration. INPUTS: n current_time : Current time in seconds on the video label : Label that will be applied to all frames between frame_start and frame_end OUTPUTS: Current Configuration File ''' config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() framerate = config["current_framerate"] num_vids = config["num_vids"] if not current_time: current_time = 0 config["frame_end"] = int(round(current_time * framerate)) config_file.set(config) label_str = [ html.P("Press ADD LABEL to add the following to the DataBase: \n"), html.P("frame_start : {} ( {} seconds)".format( config["frame_start"], config["frame_start"] / framerate)), html.P("frame_end : {} ( {} seconds)".format( config["frame_end"], config["frame_end"] / framerate)), html.P("label : {}".format(label)), html.P("Video {} of {}".format(config["current_video_pos"] + 1, num_vids)) ] config_str = [html.P("{} : {} \n".format(k, v)) for k, v in config.items()] return label_str + config_str
def load_all(): ''' Perform the following initial steps: - Find and update csv file containing urls of all videos in args.STATIC - create it if it does not exist - Find csv file for storing frame labels - create it if it does not exist ''' # load application configuration # generate the index of videos to be used gi.generate_index(STATIC_SHORTCUT_LOC, VIDEO_URLS_FILE_LOC, COLNAME) # generate the index of frames with labels from videos in the index. flu.generate_frame_labels(VIDEO_URLS_FILE_LOC, COLNAME, FRAME_URLS_FILE_LOC, PAD=7) # setup the SQLite database for read/writes of labels from the tool for chunk in pd.read_csv(FRAME_URLS_FILE_LOC, chunksize=1024**2): chunk.to_sql(name=table_name, con=connex, if_exists="append", index=False) connex.commit() # save configuration config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() config["video_urls_file_loc"] = VIDEO_URLS_FILE_LOC config["frame_urls_file_loc"] = FRAME_URLS_FILE_LOC config["frame_db_loc"] = FRAMES_DB config["last_video_url"] = config["current_video_url"] config_file.set(config)
def write_label(n, current_time, current_label): ''' Writes the label for frames between frame_start and frame_end as current_label ''' config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() framerate = config["current_framerate"] frame_start = config["frame_start"] current_video_url = config["current_video_url"] label_author = config["author"] frame_end = int(round(current_time * framerate)) label_dic = { VIDEO_URL_COLNAME: current_video_url, FRAME_START_COLNAME: frame_start, FRAME_END_COLNAME: frame_end, LABEL_COLNAME: current_label, AUTHOR_COLNAME: label_author, TIMESTAMP_COLNAME: time.time() } squ.insert_label(connex, table_name, [k for k in label_dic.keys()], [v for v in label_dic.values()]) # reset the frame_start to current frame config["frame_start"] = frame_end config_file.set(config) return [ html.P("Frames {} to {} Written to DataBase as {}!".format( frame_start, frame_end, current_label)), html.P("for video {}".format(current_video_url)) ]
def update_output(value): # Initialise config file config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() # write the new value to the config config["current_scene_label"] = value config_file.set(config) # Display the current selected step return 'This scene will be labelled: "{}"'.format(value)
def update_footage(value): # Initialise config file config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() # write the new value to the config config["next_footage_step"] = value config_file.set(config) # Display the current selected step return 'The selection will move {} steps when "NEXT VIDEO" pressed.'.format(value)
def load_all(): ''' Perform the following initial steps: - Find and update csv file containing urls of all videos in args.STATIC - create it if it does not exist - Find csv file for storing frame labels - create it if it does not exist ''' # load application configuration # generate the index of videos to be used gi.generate_index(STATIC_SHORTCUT_LOC, VIDEO_URLS_FILE_LOC, COLNAME) config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() config["video_urls_file_loc"] = VIDEO_URLS_FILE_LOC # find the stored frames file config_file.set(config)
def update_label(current_label, current_time): ''' This function is called when the label choice changes. To simplify operation, we will only update labels when the video is playing forwards ie. - if frame_start < current_frame and last_video_url = current_video_url: update the label for all frames: frame_start<= frame <current_frame to last_label - else: Do NOT write to database! ''' print(current_time) config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() config["last_label"] = current_label config_file.set(config) return 'This scene will be labelled: "{}"'.format(current_label)
def next_footage(footage, current_time): ''' INPUTS: footage : Dummy variable for number of times "NEXT VIDEO" Button is triggered current_time : Current time in seconds on the video (Unused!) OUTPUTS: url : Url of the next video ''' # Find desired footage and update player video # find current video position and step to move config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() last_video_url = config["last_video_url"] current_pos = config["current_video_pos"] next_footage_step = config["next_footage_step"] last_video_url = config["current_video_url"] num_vids = config["num_vids"] url_df = pd.read_csv(config["video_urls_file_loc"]) new_pos = (current_pos + next_footage_step) % (num_vids) # find the video corresponding to new_pos full_url = url_df.at[new_pos, COLNAME] # must change so that it only refers to the static folder (limitation of Dash) url = full_url.replace(str(app_file_parent_path), '') # get framerate video = cv2.VideoCapture(full_url) # check we can read the frame res, image = video.read() if not res: print("Video unable to read video \n{}".format(full_url)) FRAMERATE = int(video.get(cv2.CAP_PROP_FPS)) # update config config["current_video_pos"] = new_pos config["current_framerate"] = FRAMERATE config["current_video_url"] = full_url # We are at the start of the video so do nothing # set frame_start, last_label, last_video_url now config["frame_start"] = 0 config["last_label"] = labels[0] config["last_video_url"] = last_video_url config_file.set(config) # return new url return url
def next_footage(footage): # Find desired footage and update player video # find current video position and step to move config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() current_pos = config["current_video_pos"] next_footage_step = config["next_footage_step"] new_pos = (current_pos + next_footage_step) % (len(url_list)) # find the video corresponding to new_pos url = url_list[new_pos] # update config config["current_video_pos"] = new_pos config_file.set(config) # check if new url exists if not os.path.exists(url): print("Cannot find new video!") # return new url return url
def next_footage(footage): # Find desired footage and update player video # find current video position and step to move config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() current_pos = config["current_video_pos"] next_footage_step = config["next_footage_step"] url_df = pd.read_csv(config["video_urls_file_loc"]) new_pos = (current_pos + next_footage_step ) % (len(url_df)) # find the video corresponding to new_pos # must change so that it only refers to the static folder (limitation of Dash) url = url_df.at[new_pos, COLNAME].replace(str(app_file_parent_path), '') # this url needs to be changed to its location in STATIC # update config config["current_video_pos"] = new_pos config_file.set(config) # return new url return url
def next_footage(footage, current_time): # Find desired footage and update player video # find current video position and step to move config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() current_pos = config["current_video_pos"] next_footage_step = config["next_footage_step"] url_df = pd.read_csv(config["video_urls_file_loc"]) new_pos = (current_pos + next_footage_step) % (len(url_df)) # find the video corresponding to new_pos full_url = url_df.at[new_pos, COLNAME] # must change so that it only refers to the static folder (limitation of Dash) url = full_url.replace(str(app_file_parent_path), '') # get framerate video = cv2.VideoCapture(full_url) FRAMERATE = int(video.get(cv2.CAP_PROP_FPS)) # update config config["current_video_pos"] = new_pos config["current_framerate"] = FRAMERATE config["current_video_url"] = full_url last_frame = config["last_frame"] last_label = config["last_label"] last_video_url = config["last_video_url"] current_video_url = config["current_video_url"] framerate = config["current_framerate"] current_label = last_label # Did not change the label! # get the current frame if current_time: current_frame = int(round(current_time * framerate)) if last_frame <= current_frame: # update the sql fields print("Updating Video: {} \nFrames: {} to {}\nLabel: {}".format( current_video_url, last_frame, current_frame, last_label)) else: current_frame = 0 # We are at the start of the video so do nothing # set last_frame, last_label, last_video_url now config["last_frame"] = current_frame config["last_label"] = current_label config["last_video_url"] = current_video_url config_file.set(config) # return new url return url
def update_label(current_label, current_time): ''' This function is called when the label choice changes. To simplify operation, we will only update labels when the video is playing forwards ie. - if last_frame < current_frame and last_video_url = current_video_url: update the label for all frames: last_frame<= frame <current_frame to last_label - else: Do NOT write to database! ''' print(current_time) config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() last_frame = config["last_frame"] last_label = config["last_label"] last_video_url = config["last_video_url"] current_video_url = config["current_video_url"] framerate = config["current_framerate"] # get the current frame if current_time: current_frame = int(round(current_time * framerate)) if (last_frame <= current_frame) and (current_video_url == last_video_url) and ( current_label != last_label): # update the sql fields print("Updating Video: {} \nFrames: {} to {}\nLabel: {}".format( current_video_url, last_frame, current_frame, last_label)) else: current_frame = 0 # We are at the start of the video so do nothing # set last_frame, last_label, last_video_url now config["last_frame"] = current_frame config["last_label"] = current_label config["last_video_url"] = current_video_url config_file.set(config) return 'This scene will be labelled: "{}"'.format(current_label)
# generate configuration file os.mkdir(CONFIG_LOC) print("Directory {} created.".format(CONFIG_LOC)) print("{} did not exist. \nIt will now be created.".format( CONFIG_LOC, CONFIG_FILE_LOC)) except: print("Directory {} already exists.".format(CONFIG_LOC)) try: file = open(CONFIG_FILE_LOC, 'r') print("{} already exists.".format(CONFIG_FILE_LOC)) except IOError: print( "{} did not exist but the {} directory did. \nIt will now be created." .format(CONFIG_LOC, CONFIG_FILE_LOC)) config_file = JSONPropertiesFile(CONFIG_FILE_LOC, default_properties) config = config_file.get() print("Config read successful.") # Get CSS stylesheets external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css'] app = dash.Dash(__name__, external_stylesheets=external_stylesheets) ### TEMPORARY FIXES ### # attempt to use an array of urls url_list = ["static/output.mp4", "static/1_CPU.mp4", "static/2_CPU.mp4"] ### \TEMPORARY FIXES ### app.layout = html.Div(children=[
# custom scripts from common.data.labels.app.config_utils import JSONPropertiesFile app_file_parent_path = Path(__file__).absolute().parent CONFIG_LOC = os.path.join(app_file_parent_path, "config") CONFIG_FILE_LOC = os.path.join(CONFIG_LOC, "config.json") # Ensure the configuration file exists try: # generate configuration file os.mkdir(CONFIG_LOC) print("Directory {} created.".format(CONFIG_LOC)) # file = open(CONFIG_FILE_LOC, 'w') print("{} did not exist. \nIt will now be created.".format( CONFIG_LOC, CONFIG_FILE_LOC)) except: print("Directory {} already exists.".format(CONFIG_LOC)) try: file = open(CONFIG_FILE_LOC, 'r') print("{} already exists.".format(CONFIG_FILE_LOC)) except IOError: print( "{} did not exist but the {} directory did. \nIt will now be created." .format(CONFIG_LOC, CONFIG_FILE_LOC)) file_path = CONFIG_FILE_LOC default_properties = {} config_file = JSONPropertiesFile(file_path, default_properties) config = config_file.get() print(config) config["PROD"] = "k else" config_file.set(config) # save new config
def inference_demo(v, o, m): ## initialise camera cap = cv2.VideoCapture(v) vid_FPS = cap.get(cv2.CAP_PROP_FPS) # perform camera test ret, frame = cap.read() print("The camera was initialised: {}".format(ret)) # initialise audio track from mp4 file temp_fn = "audio.wav" # temporary filename # make sure this filename doesn't exist while os.path.isfile(temp_fn): # delete temporary file # file deletions appear to fail sometimes os.remove(temp_fn) time.sleep(0.01) command = [ "ffmpeg", "-i", v, "-ab", "160k", "-ac", "2", "-ar", "44100", "-vn", temp_fn ] subprocess.call(command) # initialise models cfgs = [] # configuration files clfs = [] # classifier pipelines prev = [] # number of previous frames required nms = [] # names for i in range(len(m)): # load the configuration file config_file = JSONPropertiesFile(m[i]) config = config_file.get() cfgs.append(config) clfs.append(joblib.load(config["model_store"])) # load the classifiers prev.append(config["n_prev"]) # number of previous frames required nms.append(config["name"]) print("Models loaded") # all share the same dataset config, s s = importlib.import_module(cfgs[0]["m_loc"]) # loads the model location headers = s.const_header() # feature headers # read the audio file wf = wave.open(temp_fn, 'rb') p = pyaudio.PyAudio() # define hardware specific parameters RATE = wf.getframerate() CHUNK = int( np.floor(RATE * s.const_trail()) ) # how many samples to listen for each time prediction attempted # open stream based on the wave object which has been input. stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True) # describe the models (requires s to be loaded in first) # decribe the models to be used: for i in range(len(m)): print( "\n\nMODEL {}: \n {} \nFeatures: \n{}\nMean: \n{}\nVariance: \n{}". format(nms[i], "-*-" * 10, dict(zip(headers[3:], cfgs[i]["sel_headers"][3:])), clfs[i].named_steps[cfgs[i]["scaler"]].mean_, clfs[i].named_steps[cfgs[i]["scaler"]].var_)) # loop until keyboard exception or video complete frames = 0 n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) offset = int( np.ceil(s.const_trail() * vid_FPS) + 1 * round(vid_FPS)) # audio may not be read correctly in last second # store predicted probability and the features and were supplied to model res = np.zeros((n_frames - offset, len(headers[3:]) + len(m))) start = time.time() start_frame = 0 # frame to start inference from with progressbar.ProgressBar(max_value=n_frames - offset - start_frame) as bar: cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) for i in range(n_frames - offset - start_frame): try: # run the loop # gather camera frame ret, frame = cap.read() # put frame into correct format frame = cv2.resize(frame, (224, 224), interpolation=cv2.INTER_NEAREST) # do PIL conversion to numpy using the Keras preprocessing functions frame = img_to_array(frame) frame = np.expand_dims(frame, axis=0) # preprocess_input for image_net frame = preprocess_input(frame) if not ret: print("Failed to obtain camera data") break # gather audio data audio = audio_numpy(wf, start_frame + i, vid_FPS, CHUNK, RATE) # begin storing historical data if i == 0: # no previous data, instead update with new image_batch = np.repeat(frame, max(prev) + 1, axis=0) audio_batch = np.repeat(audio, max(prev) + 1, axis=0) else: # be careful with the order here! Take special care # to ensure this matches the order of frames that preprocess_input expects image_batch[:-1] = image_batch[ 1:] # update all prev, drop last audio_batch[:-1] = audio_batch[1:] image_batch[-1] = frame # first entry becomes new frame audio_batch[-1] = audio # obtain the dataset features feats = s.preprocess_input(image_batch, audio_batch, inference=True, RATE=RATE) # obtain classifications from each model p = np.zeros((len(m))) for j in range(len(clfs)): # check if we need a subset of the features only sel_headers = cfgs[j][ "sel_headers"] # some array of booleans feats_j = feats[:, sel_headers[ 3:]] # selected features for the model pred = clfs[j].predict_proba( feats_j) # save the predicted probability p[j] = pred[0, 1] res[i, ] = np.insert(feats, 0, p) bar.update(i) except KeyboardInterrupt: break effective_FPS = (n_frames - offset - start_frame) / (time.time() - start) print("Effective Frame Rate: {}".format(effective_FPS)) # write the results to a csv colnames = np.insert(headers[3:], 0, nms) print(colnames) df = pd.DataFrame(res, columns=colnames) df.to_csv(o) # release camera and microphone print("Releasing Camera and Microphone") cap.release() # release camera stream.stop_stream # release audio stream.close return df