def Read_Input_Images(inputDir, listOfIgnoredSamples, dB, resizedFlag, table, workplace, spatial_size, channel, objective_flag): r = w = spatial_size SubperdB = [] # cross-checking parameter subperdb_id = [] for sub in sorted([infile for infile in os.listdir(inputDir)]): VidperSub = [] vid_id = np.empty([0]) for vid in sorted([inrfile for inrfile in os.listdir(inputDir + sub)]): path = inputDir + sub + '/' + vid + '/' # image loading path if path in listOfIgnoredSamples: continue imgList = readinput(path) numFrame = len(imgList) if resizedFlag == 1: col = w row = r else: img = cv2.imread(imgList[0]) [row, col, _l] = img.shape ## read the label for each input video collectinglabel(table, sub[3:], vid, workplace + 'Classification/', dB, objective_flag) for var in range(numFrame): img = cv2.imread(imgList[var]) [_, _, dim] = img.shape if channel == 1: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if resizedFlag == 1: img = cv2.resize(img, (col, row)) if var == 0: FrameperVid = img.flatten() else: FrameperVid = np.vstack((FrameperVid, img.flatten())) vid_id = np.append(vid_id, imgList[var]) # <--cross-check VidperSub.append(FrameperVid) subperdb_id.append(vid_id) # <--cross-check SubperdB.append(VidperSub) return SubperdB
def Read_SMIC_Images(inputDir, listOfIgnoredSamples, dB, resizedFlag, table, workplace, spatial_size): r = w = spatial_size SubperdB = [] for sub in sorted([ int(infile[1:]) for infile in os.listdir(inputDir) ]): sub_str = "s" + str(sub) VidperSub = [] for type_me in sorted([ inrfile for inrfile in os.listdir(inputDir + sub_str + "/micro/") ]): for vids in sorted([ video for video in os.listdir(inputDir + sub_str + "/micro/" + type_me + "/") ]): first_frame = True for item in sorted([image for image in os.listdir(inputDir + sub_str + "/micro/" + type_me + "/" + vids + "/")]): item = inputDir + sub_str + "/micro/" + type_me + "/" + vids + "/" + item img = cv2.imread(item) [_, _, dim] = img.shape if dim == 3: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if resizedFlag == 1: img = cv2.resize(img, (r, w)) if first_frame: FrameperVid = img.flatten() else: FrameperVid = np.vstack(( FrameperVid, img.flatten() )) first_frame = False VidperSub.append(FrameperVid) # the label in xcel is not the same as in the path if sub < 10: vids = vids[:1] + "0" + vids[1:] collectinglabel(table, sub, vids, workplace + "Classification/", dB) SubperdB.append(VidperSub) return SubperdB
def Read_Input_Images_Apex(inputDir, listOfIgnoredSamples, dB, resizedFlag, table, workplace, spatial_size, channel, objective_flag): r = w = spatial_size SubperdB = [] # cross-checking parameter subperdb_id = [] for sub in sorted([infile for infile in os.listdir(inputDir)]): VidperSub = [] for vid in sorted([inrfile for inrfile in os.listdir(inputDir + sub)]): path = inputDir + sub + '/' + vid + '/' # image loading path if path in listOfIgnoredSamples: print(path) continue imgList = readinput(path) collectinglabel(table, sub[3:], vid.split(".")[0], workplace + 'Classification/', dB, objective_flag) table_df = pd.DataFrame(table) apexInd = table_df[table_df[0] == sub[-2:]] apexInd = int(apexInd[apexInd[1] == vid][3]) imgON = cv2.imread(imgList[0]) imgApex = cv2.imread(imgList[apexInd]) if channel == 1: imgON = cv2.cvtColor(imgON, cv2.COLOR_BGR2GRAY) imgApex = cv2.cvtColor(imgApex, cv2.COLOR_BGR2GRAY) if resizedFlag == 1: imgON = cv2.resize(imgON, (spatial_size, spatial_size)) imgApex = cv2.resize(imgApex, (spatial_size, spatial_size)) FrameperVid = imgON.flatten() FrameperVid = np.vstack((FrameperVid, imgApex.flatten())) VidperSub.append(FrameperVid) SubperdB.append(VidperSub) return SubperdB
continue imgList = readinput(path, dB) numFrame = len(imgList) if resizedFlag == 1: col = w row = r else: img = cv2.imread(imgList[0]) [row, col, _l] = img.shape ## read the label for each input video collectinglabel(table, sub[3:], vid, workplace + 'Classification/', dB) for var in range(numFrame): img = cv2.imread(imgList[var]) [_, _, dim] = img.shape if dim == 3: # pass img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if resizedFlag == 1: # in resize function, [col,row] img = cv2.resize(img, (col, row)) if var == 0: FrameperVid = img.flatten()
def Read_Input_Images_SAMM_CASME(inputDir, filteredSamples, ignoredSamples, dB, resizedFlag, table, workplace, spatial_size, channel, objective_flag): # r=224; w=224 r = w = spatial_size SubperdB = [] # cross-checking parameter subperdb_id = [] for sub in sorted([infile for infile in os.listdir(inputDir)]): VidperSub = [] vid_id = np.empty([0]) for vid in sorted([inrfile for inrfile in os.listdir(inputDir + sub)]): path = inputDir + sub + '/' + vid + '/' # print(len(filteredSamples)) # print(filteredSamples) # print("bohaha") # filtered samples are samples needed if path not in filteredSamples: # print(path) continue # print(dB) # print(path) imgList = readinput(path) numFrame = len(imgList) # print(numFrame) if resizedFlag == 1: col = w row = r else: img = cv2.imread(imgList[0]) [row, col, _l] = img.shape ## ##read the label for each input video # print(sub[3:]) collectinglabel(table, sub, vid, workplace + 'Classification/', dB, objective_flag) for var in range(numFrame): img = cv2.imread(imgList[var]) [_, _, dim] = img.shape if channel == 1: img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if resizedFlag == 1: #in resize function, [col,row] img = cv2.resize(img, (col, row)) if var == 0: FrameperVid = img.flatten() else: FrameperVid = np.vstack((FrameperVid, img.flatten())) vid_id = np.append(vid_id, imgList[var]) # <--cross-check VidperSub.append(FrameperVid) subperdb_id.append(vid_id) # <--cross-check # print(subperdb_id) # if len(VidperSub) > 0: # print(len(VidperSub)) SubperdB.append(VidperSub) # return SubperdB, vid_id, subperdb_id return SubperdB