def displayCards(frame, rider, deckSize, discard, played): subFrames = Frames(frame) displayRider(subFrames.new(), rider) framesLine = subFrames.newLine(3) displayDeck(framesLine[0], deckSize) displayDiscard(framesLine[1], discard, rider.color) displayPlayed(framesLine[2], sorted(played))
def main(): window = tk.Tk() window.title("Library") frames = Frames(window) reliefs(frames.new()) showColors(frames.new()) window.bind("<space>", lambda e: window.destroy()) window.mainloop()
def __init__(self): """ Set all variables to its default values """ self.total_inst = 0 self.stat_vars = 0 self.instructions = [] self.labels = [] self.calls = [] self.names_pattern = re.compile("[^A-ZÁ-Ža-zá-ž0-9\-\*\$%_&]") self.frames = Frames() self.variables_factory = VariablesFactory(self.frames)
def __init__(self, datadir, addiliRecords=True, addWictionary=True): """ The Germanet object is initialized with the directory where the Germanet data is stored. The data is loaded when Germanet is initialized. :param datadir: [String] The path to the directory where the Germanet data is stored :param addiliRecords: a boolean, denotes whether the iliRecords should also be loaded into the Germanet object, default: True :param addWictionary: a boolean, denotes whether the wictionary files should also be loaded into the Germanet object, default: True """ self.datadir = datadir self.addiliRecords = addiliRecords self.addWictionary = addWictionary # Dictionary: lexunit id - lexunit object self._lexunits = {} # Dictionary: synset id - synset object self._synsets = {} # Dictionary: any orthform (all variants) - lexunit id self._orthform2lexid = defaultdict(set) # Dictionary: main orthform - lexunit id self._mainOrtform2lexid = defaultdict(set) # Dictionary: lower cased orht form (all variants) - lexunit id self._lowercasedform2lexid = defaultdict(set) # Dictionary: Wordcategory - set of lexunit ids self._wordcat2lexid = defaultdict(set) # Dictionary: Wordclass - set of lexunit ids self._wordclass2lexid = defaultdict(set) # Set if synsets (that are compounds) self._compounds = set() # Dictionary: Frame - Lexunit objects self._frames2lexunits = defaultdict(set) # List: wictionary entries self._wiktionary_entries = [] # List: ili Records self._ili_records = [] self.load_data() self._frames = Frames(self._frames2lexunits)
def __init__(self, manufacturer): # the names list contains model from Specialized bikes as I am a huge fan of its bikes (I own one! :-)) names = [ "Pitch", "Epic", "Source", "Enduro", "Fatboy", "Status", "Demo", "Tarmac", "Allez", "Venge", "Shiv", "Roubaix", "Secteur", "Diverge", "Awol", "Crux", "Langster", "Sirrus", "Daily", "Crosstail", "CossRoads", "Expedition" ] self.frame = Frames(1) self.wheels = Wheels(1) self.manufacturer = manufacturer self.model_name = choice(names) self.weight = (self.frame.frame_weight + 2 * self.wheels.wheel_weight) / 1000.0 self.cost = self.frame.frame_cost + 2 * self.wheels.wheel_cost
def __init__(self, program_list): # seznam instrukci vstupniho programu self.program_list = program_list # pocet vsech vykonanych instrukci # (muze byt vyssi nez pocet instrukci ve vstupnim programu) self.ins_cntr = 0 self.prog_cntr = 0 # poradi prave prochazene instrukce od 0 # seznam slovniku promennych v ramci GF, # docasne tam budou vsechny promenne (TF i LF) # slovnik promennych - jmeno:hodnota self.frames = Frames(self.prog_cntr) self.label_dict = {} # jmeno : poradi_instrukce self.data_stack = [] # stack pro POPS a PUSHS self.call_stack = [] # promenna pro skoky # - interpret projde znovu program od mista, kam se skoci self.again = True
def start(self, video, input_path, out_path, frames_video_path, t, k, h): """ :param video: Ruta del vídeo :param input_path: Ruta donde se generarán los fotogramas del video :param out_path: Ruta donde se añadirán los fotogramas clave del video :param frames_video_path: Ruta donde se añadirán los fotogramas para generar el video resumen :param t: Número de fotogramas a saltar :param k: Número de grupos para k-means :param h: Tamaño del histográma generado """ # Inicializamos la clase Frames frames = Frames() # Obtenemos los fotogramas del video frames.get_frames_from_video(video, input_path) # Obtenemos fotogramas clave y fotogramas para generar el video key_frames, frames_video = self.__get_key_frames(input_path, t, k, h) # Movemos los fotogramas a sus respectivas carpetas frames.move_key_frames(input_path, out_path, key_frames) frames.move_key_frames(input_path, frames_video_path, frames_video) # Generamos el video resumen frames.get_video_from_frames(frames_video_path)
def __init__(self, db_conn, db_name): from pymongo import Connection print "Opening MongoDB connection" self.conn = Connection(host=db_conn) self.db = self.conn[db_name] # Open subcollections self.knowledge = Knowledge(self) self.frames = Frames(self) #self.map = Map(self) self.geo = Geo(self) # Logging from wifistalker import Log header = 'DB' self.log = Log(self, use_stdout=True, header=header) # Log collection self._log = self['log'] self._log.ensure_index('stamp_utc', expireAfterSeconds=60*60)
type = 'video' if type == 'image': names = glob.glob('Images/2012*.tif') # names = glob.glob('Images/Moon_Tile-031*ap85_8b.tif') # names = glob.glob('Images/Example-3*.jpg') else: # file = 'short_video' file = 'Moon_Tile-024_043939' names = 'Videos/' + file + '.avi' print(names) start_over_all = time() # Get configuration parameters. configuration = Configuration() try: frames = Frames(names, type=type) print("Number of images read: " + str(frames.number)) print("Image shape: " + str(frames.shape)) except Exception as e: print("Error: " + e.message) exit() # Rank the frames by their overall local contrast. rank_frames = RankFrames(frames, configuration) start = time() rank_frames.frame_score() end = time() print('Elapsed time in ranking images: {}'.format(end - start)) print("Index of maximum: " + str(rank_frames.frame_ranks_max_index)) # Initialize the frame alignment object.
def __init__(self, window, decksCount): framesFactory = Frames(window) self.user = framesFactory.new() self.track = framesFactory.new() self.decks = framesFactory.newLine(decksCount)
def workflow(input_name, input_type='video', roi=None, automatic_ap_creation=True): """ Execute the whole stacking workflow for a test case. This can either use a video file (.avi .mov .mp4 .ser) or still images stored in a single directory. :param input_name: Video file (.avi .mov .mp4 .ser) or name of a directory containing still images :param input_type: Either "video" or "image" (see "input_name") :param roi: If specified, tuple (y_low, y_high, x_low, x_high) with pixel bounds for "region of interest" :return: average, [average_roi,] color_image_with_aps, stacked_image with: - average: global mean frame - average_roi: mean frame restricted to ROI (only if roi is specified) - color_image_with_aps: mean frame overlaid with alignment points and their boxes (white) and patches (green) """ # Initalize the timer object used to measure execution times of program sections. my_timer = timer() # Images can either be extracted from a video file or a batch of single photographs. Select # the example for the test run. # For video file input, the Frames constructor expects the video file name for "names". if input_type == 'video': names = input_name # For single image input, the Frames constructor expects a list of image file names for "names". else: names = [ os.path.join(input_name, name) for name in os.listdir(input_name) ] stacked_image_name = input_name + '.stacked.tiff' # The name of the alignment point visualization file is derived from the input video name or # the input directory name. ap_image_name = input_name + ".aps.tiff" print( "\n" + "*************************************************************************************\n" + "Start processing " + str(input_name) + "\n*************************************************************************************" ) my_timer.create('Execution over all') # Get configuration parameters. configuration = Configuration() configuration.initialize_configuration() # Read the frames. print("+++ Start reading frames") my_timer.create('Read all frames') try: frames = Frames(configuration, names, type=input_type) print("Number of images read: " + str(frames.number)) print("Image shape: " + str(frames.shape)) except Error as e: print("Error: " + str(e)) exit() my_timer.stop('Read all frames') # Rank the frames by their overall local contrast. print("+++ Start ranking images") my_timer.create('Ranking images') rank_frames = RankFrames(frames, configuration) rank_frames.frame_score() my_timer.stop('Ranking images') print("Index of best frame: " + str(rank_frames.frame_ranks_max_index)) # Initialize the frame alignment object. align_frames = AlignFrames(frames, rank_frames, configuration) if configuration.align_frames_mode == "Surface": my_timer.create('Select optimal alignment patch') # Select the local rectangular patch in the image where the L gradient is highest in both x # and y direction. The scale factor specifies how much smaller the patch is compared to the # whole image frame. (y_low_opt, y_high_opt, x_low_opt, x_high_opt) = align_frames.compute_alignment_rect( configuration.align_frames_rectangle_scale_factor) my_timer.stop('Select optimal alignment patch') print("optimal alignment rectangle, y_low: " + str(y_low_opt) + ", y_high: " + str(y_high_opt) + ", x_low: " + str(x_low_opt) + ", x_high: " + str(x_high_opt)) # Align all frames globally relative to the frame with the highest score. print("+++ Start aligning all frames") my_timer.create('Global frame alignment') try: align_frames.align_frames() except NotSupportedError as e: print("Error: " + e.message) exit() except InternalError as e: print("Warning: " + e.message) my_timer.stop('Global frame alignment') print("Intersection, y_low: " + str(align_frames.intersection_shape[0][0]) + ", y_high: " + str(align_frames.intersection_shape[0][1]) + ", x_low: " \ + str(align_frames.intersection_shape[1][0]) + ", x_high: " \ + str(align_frames.intersection_shape[1][1])) # Compute the average frame. print("+++ Start computing reference frame") my_timer.create('Compute reference frame') average = align_frames.average_frame() my_timer.stop('Compute reference frame') print("Reference frame computed from the best " + str(align_frames.average_frame_number) + " frames.") # If the ROI is to be set to a smaller size than the whole intersection, do so. if roi: print("+++ Start setting ROI and computing new reference frame") my_timer.create('Setting ROI and new reference') average_roi = align_frames.set_roi(roi[0], roi[1], roi[2], roi[3]) my_timer.stop('Setting ROI and new reference') # Initialize the AlignmentPoints object. my_timer.create('Initialize alignment point object') alignment_points = AlignmentPoints(configuration, frames, rank_frames, align_frames) my_timer.stop('Initialize alignment point object') if automatic_ap_creation: # Create alignment points, and create an image with wll alignment point boxes and patches. print("+++ Start creating alignment points") my_timer.create('Create alignment points') # If a ROI is selected, alignment points are created in the ROI window only. alignment_points.create_ap_grid() my_timer.stop('Create alignment points') print("Number of alignment points selected: " + str(len(alignment_points.alignment_points)) + ", aps dropped because too dim: " + str(alignment_points.alignment_points_dropped_dim) + ", aps dropped because too little structure: " + str(alignment_points.alignment_points_dropped_structure)) else: # Open the alignment point editor. app = QtWidgets.QApplication(sys.argv) alignment_point_editor = AlignmentPointEditorWidget( None, configuration, align_frames, alignment_points, None) alignment_point_editor.setMinimumSize(800, 600) alignment_point_editor.showMaximized() app.exec_() print("After AP editing, number of APs: " + str(len(alignment_points.alignment_points))) count_updates = 0 for ap in alignment_points.alignment_points: if ap['reference_box'] is not None: continue count_updates += 1 AlignmentPoints.set_reference_box(ap, alignment_points.mean_frame) print("Buffers allocated for " + str(count_updates) + " alignment points.") # Produce an overview image showing all alignment points. if roi: color_image_with_aps = alignment_points.show_alignment_points( average_roi) else: color_image_with_aps = alignment_points.show_alignment_points(average) # For each alignment point rank frames by their quality. my_timer.create('Rank frames at alignment points') print("+++ Start ranking frames at alignment points") alignment_points.compute_frame_qualities() my_timer.stop('Rank frames at alignment points') # Allocate StackFrames object. stack_frames = StackFrames(configuration, frames, rank_frames, align_frames, alignment_points, my_timer) # Stack all frames. print("+++ Start stacking frames") stack_frames.stack_frames() # Merge the stacked alignment point buffers into a single image. print("+++ Start merging alignment patches") stacked_image = stack_frames.merge_alignment_point_buffers() # If the drizzle factor is 1.5, reduce the pixel resolution of the stacked image buffer # to half the size used in stacking. if configuration.drizzle_factor_is_1_5: print("+++ Start reducing image buffer size") stack_frames.half_stacked_image_buffer_resolution() # Save the stacked image as 16bit int (color or mono). my_timer.create('Saving the final image') Frames.save_image(stacked_image_name, stacked_image, color=frames.color, header=configuration.global_parameters_version) my_timer.stop('Saving the final image') # Print out timer results. my_timer.stop('Execution over all') my_timer.print() # Write the image with alignment points. Frames.save_image(ap_image_name, color_image_with_aps, color=True, header=configuration.global_parameters_version) # If a ROI is selected, return both the original and the reduced-size average frame. if roi: return average, average_roi, color_image_with_aps, stacked_image else: return average, color_image_with_aps, stacked_image
acc = [] gyr = [] mag = [] touch = [] c_timestamp = [] c_acc = [] c_gyr = [] c_mag = [] c_touch = [] saveQ = [] saveT = [] saveS = [] frames = Frames() mad = madgwickahrs.MadgwickAHRS() def draw(timestamp, gyr, acc, mag, touch): while True: if len(timestamp) > 1: if threadLock.acquire(100): c_timestamp = copy.deepcopy(timestamp) c_gyr = copy.deepcopy(gyr) c_acc = copy.deepcopy(acc) # c_mag = copy.deepcopy(mag) c_touch = copy.deepcopy(touch) threadLock.release() frames.read_serial(c_timestamp, c_gyr, c_acc, c_mag, c_touch) frames.preprocess()
# names = glob.glob('Images/Example-3*.jpg') else: # file = 'short_video' file = 'Moon_Tile-024_043939' names = 'Videos/' + file + '.avi' print(names) my_timer.create('Execution over all') # Get configuration parameters. configuration = Configuration() # Read the frames. my_timer.create('Read all frames') try: frames = Frames(names, type=type, convert_to_grayscale=True) print("Number of images read: " + str(frames.number)) print("Image shape: " + str(frames.shape)) except Exception as e: print("Error: " + e.message) exit() my_timer.stop('Read all frames') # Rank the frames by their overall local contrast. my_timer.create('Ranking images') rank_frames = RankFrames(frames, configuration) rank_frames.frame_score() my_timer.stop('Ranking images') print("Index of best frame: " + str(rank_frames.frame_ranks_max_index)) # Initialize the frame alignment object.
def __init__(self, window): self.frames = Frames(window) window.bind("<space>", lambda e: window.destroy())
def execute_frames(self, input_name, input_type, convert_to_grayscale): # If objects are left over from previous run, delete them. for obj in [ self.frames, self.rank_frames, self.align_frames, self.alignment_points, self.stack_frames ]: if obj is not None: del obj # Force the garbage collector to release unreferenced objects. gc.collect() # Update the status bar in the main GUI. self.input_name = input_name self.set_status_bar_processing_phase("reading frames") # A jobs can either "stack" images or "postprocess" a single image. In the latter case, # input is a single image file. # # Images for stacking can either be extracted from a video file or a batch of single # photographs. In the first case, input_type is set to 'video', in the second case to # 'image'. if input_type == 'postproc': self.job_type = 'postproc' self.postproc_input_name = input_name # Reset the postprocessed image to None. This way, in saving the postprocessing result, # it can be checked if an image was computed in the workflow thread. self.postprocessed_image = None self.postprocessed_image_name = PostprocDataObject.set_file_name_processed( input_name, self.configuration.postproc_suffix, self.configuration.global_parameters_image_format) self.attached_log_name = splitext( input_name)[0] + '_postproc-log.txt' # For video file input, the Frames constructor expects the video file name for "names". elif input_type == 'video': self.job_type = 'stacking' names = input_name self.stacked_image_name = splitext(input_name)[0] + \ self.configuration.stack_frames_suffix + '.' + \ self.configuration.global_parameters_image_format self.attached_log_name = splitext( input_name)[0] + '_stacking-log.txt' # For single image input, the Frames constructor expects a list of image file names for # "names". else: # input_type = 'image' self.job_type = 'stacking' names = [join(input_name, name) for name in listdir(input_name)] self.stacked_image_name = input_name + self.configuration.stack_frames_suffix + '.' + \ self.configuration.global_parameters_image_format self.attached_log_name = input_name + '_stacking-log.txt' # Redirect stdout to a file if requested. if self.configuration.global_parameters_write_protocol_to_file != self.output_redirected: # Output currently redirected. Reset to stdout. if self.output_redirected: sys.stdout = self.stdout_saved self.output_redirected = False # Currently set to stdout, redirect to file now. else: try: self.stdout_saved = sys.stdout sys.stdout = open(self.configuration.protocol_filename, 'a+') self.output_redirected = True except IOError: pass # Create logfile if requested to store the log with the stacked file. if self.attached_log_file: self.attached_log_file.close() if self.configuration.global_parameters_store_protocol_with_result: self.attached_log_file = open(self.attached_log_name, "w+") else: self.attached_log_file = None # Write a header to stdout and optionally to the logfile. if self.configuration.global_parameters_protocol_level > 0: decorator_line = (len(input_name) + 28) * "*" Miscellaneous.protocol(decorator_line, self.attached_log_file, precede_with_timestamp=False) Miscellaneous.protocol("Start processing " + input_name, self.attached_log_file) Miscellaneous.protocol(decorator_line, self.attached_log_file, precede_with_timestamp=False) # Initalize the timer object used to measure execution times of program sections. self.my_timer = timer() self.my_timer.create('Execution over all') if self.job_type == 'stacking': # Write parameters to the protocol. if self.configuration.global_parameters_protocol_level > 1: Miscellaneous.print_stacking_parameters( self.configuration, self.attached_log_file) # Decide on the objects to be buffered, depending on configuration parameter. buffer_original, buffer_monochrome, buffer_gaussian, buffer_laplacian = \ Frames.set_buffering(self.configuration.global_parameters_buffering_level) if self.configuration.global_parameters_protocol_level > 1: Miscellaneous.protocol( "+++ Buffering level is " + str(self.configuration.global_parameters_buffering_level) + " +++", self.attached_log_file) if buffer_original: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol("+++ Start reading frames +++", self.attached_log_file) try: self.frames = Frames( self.configuration, names, type=input_type, calibration=self.calibration, convert_to_grayscale=convert_to_grayscale, progress_signal=self.work_current_progress_signal, buffer_original=buffer_original, buffer_monochrome=buffer_monochrome, buffer_gaussian=buffer_gaussian, buffer_laplacian=buffer_laplacian) if self.configuration.global_parameters_protocol_level > 1: Miscellaneous.protocol(" Number of images: " + str(self.frames.number) + ", image shape: " + str(self.frames.shape), self.attached_log_file, precede_with_timestamp=False) if self.frames.calibration_matches: if self.calibration.master_dark_frame_adapted is not None and \ self.calibration.inverse_master_flat_frame is not None: Miscellaneous.protocol( " Dark / flat frame calibration is active", self.attached_log_file, precede_with_timestamp=False) elif self.calibration.master_dark_frame_adapted is not None: Miscellaneous.protocol( " Dark frame calibration is active", self.attached_log_file, precede_with_timestamp=False) elif self.calibration.inverse_master_flat_frame is not None: Miscellaneous.protocol( " Flat frame calibration is active", self.attached_log_file, precede_with_timestamp=False) else: Miscellaneous.protocol( " No matching master dark / flat frames found, " "calibration de-activated", self.attached_log_file, precede_with_timestamp=False) except Error as e: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol( "Error: " + e.message + ", continue with next job\n", self.attached_log_file) self.work_next_task_signal.emit("Next job") return except Exception as e: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol( "Error: " + str(e) + ", continue with next job\n", self.attached_log_file) self.work_next_task_signal.emit("Next job") return # Look up the available RAM (without paging) virtual_memory = dict(psutil.virtual_memory()._asdict()) available_ram = virtual_memory['available'] / 1e9 # Compute the approximate RAM usage of this job at the selected buffering level. needed_ram = self.frames.compute_required_buffer_size( self.configuration.global_parameters_buffering_level) if self.configuration.global_parameters_protocol_level > 1: Miscellaneous.protocol(" RAM required (Gbytes): " + str(needed_ram) + ", available: " + str(available_ram), self.attached_log_file, precede_with_timestamp=False) # If the required RAM is not available, test if lowering the buffering level would help. if needed_ram > available_ram: recommended_level = None for level in range( self.configuration.global_parameters_buffering_level - 1, -1, -1): alternative_ram = self.frames.compute_required_buffer_size( level) if alternative_ram < available_ram: recommended_level = level break # If an appropriate level was found, write it as a recommendation to the protocol. if self.configuration.global_parameters_protocol_level > 0: if recommended_level is not None: Miscellaneous.protocol( "Error: Too little RAM for chosen buffering level," " recommended level: " + str(recommended_level) + ", continuing with next job\n", self.attached_log_file) else: Miscellaneous.protocol( "Error: Too little RAM for this job, " "continuing with the next one\n", self.attached_log_file) # Continue with the next job. self.work_next_task_signal.emit("Next job") return # The RAM seems to be sufficient, continue with ranking frames. self.work_next_task_signal.emit("Rank frames") # Job type is 'postproc'. else: try: self.postproc_input_image = Frames.read_image( self.postproc_input_name) except Error as e: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol( "Error: " + e.message + ", continue with next job\n", self.attached_log_file) self.work_next_task_signal.emit("Next job") return except Exception as e: if self.configuration.global_parameters_protocol_level > 0: Miscellaneous.protocol( "Error: " + str(e) + ", continue with next job\n", self.attached_log_file) self.work_next_task_signal.emit("Next job") return # Convert 8 bit to 16 bit. if self.postproc_input_image.dtype == uint8: self.postproc_input_image = self.postproc_input_image.astype( uint16) * 256 self.work_next_task_signal.emit("Postprocessing")
from os import getcwd if __name__ == '__main__': cwd = getcwd() root = Tk() root.title('Offline Pass') root.geometry("+600+200") root.iconbitmap(cwd + '/imgs/favicon.ico') root.resizable(False, False) try: db.c.execute("SELECT password FROM master") result = db.c.fetchone()[0] if result == None: Frames(root, 'setup') else: Frames(root, 'login') except sqlite3.OperationalError: from cryptography.fernet import Fernet key = Fernet.generate_key() db.c.execute("""CREATE TABLE IF NOT EXISTS entries ( website BLOB, username BLOB, password BLOB )""") db.c.execute("""CREATE TABLE IF NOT EXISTS master ( password BLOB, salt BLOB, question BLOB,
names = glob('Images/2012*.tif') # names = glob.glob('Images/Moon_Tile-031*ap85_8b.tif') # names = glob.glob('Images/Example-3*.jpg') else: names = 'Videos/short_video.avi' print(names) my_timer.create('Execution over all') # Get configuration parameters. configuration = Configuration() my_timer.create('Read all frames') try: frames = Frames(configuration, names, type=type, convert_to_grayscale=True) print("Number of images read: " + str(frames.number)) print("Image shape: " + str(frames.shape)) except Error as e: print("Error: " + e.message) exit() my_timer.stop('Read all frames') # Rank the frames by their overall local contrast. my_timer.create('Ranking images') rank_frames = RankFrames(frames, configuration) rank_frames.frame_score() my_timer.stop('Ranking images') print("Index of maximum: " + str(rank_frames.frame_ranks_max_index))
if type == 'image': names = glob('Images/2012*.tif') # names = glob.glob('Images/Moon_Tile-031*ap85_8b.tif') # names = glob.glob('Images/Example-3*.jpg') else: names = 'Videos/short_video.avi' print(names) my_timer.create('Execution over all') # Get configuration parameters. configuration = Configuration() my_timer.create('Read all frames') try: frames = Frames(configuration, names, type=type) print("Number of images read: " + str(frames.number)) print("Image shape: " + str(frames.shape)) except Error as e: print("Error: " + e.message) exit() my_timer.stop('Read all frames') # Rank the frames by their overall local contrast. my_timer.create('Ranking images') rank_frames = RankFrames(frames, configuration) rank_frames.frame_score() my_timer.stop('Ranking images') print("Index of maximum: " + str(rank_frames.frame_ranks_max_index)) print("Frame scores: " + str(rank_frames.frame_ranks))
# names = glob.glob('Images/Moon_Tile-031*ap85_8b.tif') # names = glob.glob('Images/Example-3*.jpg') else: names = 'Videos/another_short_video.avi' print(names) my_timer.create('Execution over all') # Get configuration parameters. configuration = Configuration() configuration.initialize_configuration() my_timer.create('Read all frames') try: frames = Frames(configuration, names, type=type, bayer_option_selected="Grayscale") print("Number of images read: " + str(frames.number)) print("Image shape: " + str(frames.shape)) except Error as e: print("Error: " + e.message) exit() my_timer.stop('Read all frames') # Rank the frames by their overall local contrast. my_timer.create('Ranking images') rank_frames = RankFrames(frames, configuration) rank_frames.frame_score() my_timer.stop('Ranking images') print("Index of maximum: " + str(rank_frames.frame_ranks_max_index))