def _check_sessions_compatibilty(self): hashed_settings = [] for session in self.session_data: session_settings = self.db_sessions.find({'hash': session})[0]['ctx'] hashed = None if self.session_mode == 'strict': hashed = hashlib.md5( pickle.dumps(session_settings)).hexdigest() elif self.session_mode == 'normal': filtered_session_settings = {} for key in session_settings.keys(): if key.find('params') != -1: filtered_session_settings[key] = session_settings[key] hashed = hashlib.md5( pickle.dumps(filtered_session_settings)).hexdigest() hashed_settings.append(hashed) if len(set(hashed_settings)) != 1: utils.msg_error( 'recipe', 'Can\'t create graphs from mutually not compatabile sessions') return False return True
def __init__(self): """ Commandline argument parsing """ self.args = Args().parse_args() if (self.args == None): return """ Program init """ utils.msg_welcome() if not utils.check_libraries(): utils.msg_error('check', 'CAN\'T LAUNCH') return """ Database handling, crutial for program execution """ config = configparser.ConfigParser() config.read('config/ibt19_db.ini') self.db = Database(config['DB']) if not self.db.check_connection(): utils.msg_error('database', 'can\'t connect to database') return """ One of three modes program can run in 1: Checking directory 2: Test execution according to the recipe 3: Graph drawing """ getattr(self, 'execute_{}'.format(self.args['type']))()
def _fetch_session_input(self, session_hash, graph_session_blueprint): session_query = {'hash': session_hash} """ Track all files used in session for statistical purposes """ for input_file in self.db_data.find(session_query).distinct( 'ctx.file.name'): if input_file not in self.all_files: self.all_files.append(input_file) """ Fetch session data """ try: data_session_settings = self.db_sessions.find_one( session_query)['ctx'] except TypeError: utils.msg_error('session', 'Session {} does not exist'.format(session_name)) return False variants = data_session_settings['file_types'] """ Get main axis ordering """ streams, values = self._fetch_session_axis(session_hash, graph_session_blueprint, data_session_settings) for stream in streams: #print(graph_session_blueprint['data_settings']['values']) self._fetch_session_stream( session_hash, stream, values, graph_session_blueprint['data_settings']['values'])
def parse_args(self): mode = {'type': None, 'path': None, 'log': not self.args.nolog} for arg in vars(self.args): path = getattr(self.args, arg) if (arg != defs.get('NOLOG')): if (path): if (mode['type']): utils.msg_error( 'args', 'Can\'t combine arguments, see help for more information' ) return None else: mode['type'] = arg """ Full path required with subsequent fetching """ mode['path'] = os.path.abspath(path) if (mode['type'] == None): utils.msg_error( 'args', 'Must use at least one option, see help for more information') else: return mode
def prepare_lib(self): with open(self.config_path) as json_file: try: self.flags_match_table = json.load(json_file) return True except ValueError as e: utils.msg_error( 'internal', 'config file corrupted of not valid \n{}'.format( os.path.abspath(self.config_path))) utils.msg_error('internal', e) return False
def _add_to_fetched(self, stream, **kwargs): allowed_options = [ 'init_stream', 'init_saving', 'saving_value', 'value', 'time_compress', 'time_decompress' ] selected_options = [] for key, value in kwargs.items(): if key not in allowed_options: utils.msg_error('fetching', 'unknown option adding to fetched values') return else: selected_options.append(key) """ When we want to add empty dictionary it's logical we're just doing ground work so if it's already in place there's no need to add it as is """ if len(selected_options) == 1: sel = selected_options[0] if sel == allowed_options[0]: if stream not in self.fetched_data[self.curr_name].keys(): self.fetched_data[self.curr_name][stream] = {} elif sel == allowed_options[1]: if value not in self.fetched_data[ self.curr_name][stream].keys(): self.fetched_data[self.curr_name][stream][value] = { 'value': [], 'time_compress': [], 'time_decompress': [] } else: """ Adding values """ if 'value' in kwargs: self.fetched_data[self.curr_name][stream][ kwargs['saving_value']]['value'].append(kwargs['value']) if 'time_compress' in kwargs: self.fetched_data[self.curr_name][stream][ kwargs['saving_value']]['time_compress'].append( kwargs['time_compress']) if 'time_decompress' in kwargs: self.fetched_data[self.curr_name][stream][ kwargs['saving_value']]['time_decompress'].append( kwargs['time_decompress'])
def _check_dir_info(self): db = self.db.expose(defs.DB_DIR_SESSIONS) session_info = {} matched_dir = '' """ Before we actually refuse test without valid path already checked we try to match possible subpath """ #utils.msg_info('path', self.dir) for session in db.find({}, {'path': 1, "_id": 0}).distinct('path'): """ Path is present as-is, single match enough """ if session == self.dir: matched_dir = session break """ Path might be present in higher path, so we can use it as a springboard """ if self._is_dir_subpath(self.dir, session): matched_dir = session break """ We only accept dir information no older than 24h """ if matched_dir: session_info = db.find({ 'path': matched_dir }, { "_id": 0 }).sort('timestamp', -1).limit(1)[0] if utils.create_timestamp( ) - session_info['timestamp'] > defs.DIR_CHECK_INTERVAL: utils.msg_error( 'dir', 'Information about \n"{}" \nare older than 24h. Please refresh directory.' .format(self.dir)) return False return True else: utils.msg_error( 'recipe', 'No information about directory\n"{}"'.format(self.dir)) return False
def _prepare_file_test(self, driver, file): """ For every test we need to find infomation about file - most notable dimensions and color depth for calculation compression ratio/bpp """ db = self.db.expose(defs.DB_DIR_DATA) file_id = utils.get_path_file(file) find_params = { 'ctx.file': file_id['file'], 'ctx.path': file_id['path'] } try: file_info = db.find(find_params, { '_id': 0 }).sort('timestamp', -1).limit(1)[0] self.libs[defs.COMPRESS].set_file_info(file_info) return file_info except IndexError: utils.msg_error( 'file', 'Can\'t find information about file.\n"{}"'.format(file)) return False
def prepare_testing(self, flag): """ Create basic """ if flag: prepared_count = 0 self.curr_flag = self._match(flag['flag']) """ We can either list all options or use 'range' shortcut """ if 'range' in flag: """ We need to make sure we have correct range """ start = min(flag['range']) if start < 0: utils.msg_error( 'recipe', 'You can\'t use negative number as range.') return False end = max(flag['range']) """ Normalisation for range """ for i in range(start, end + 1): temp_buffer = [] self.dest = temp_buffer """ If we have certain modifier for counter """ if ('step' in flag): if (flag['step'] == '2n'): i = 2**i """ We need to format 'opt' for upcoming usage """ if (self.curr_flag['format'].find('pair') != -1): self._transform_flag([i, i]) else: self._transform_flag(i) self._append(self.flags['variable']['testing_blueprint'], i) self._append(self.flags['variable']['testing'], temp_buffer) prepared_count += 1 else: """ We don't normally call 'self._transform_flag()' without refreshing 'self.curr_flag' but here it creates mutual overriding so hotfix is to back it up """ format_backup = self.curr_flag['format'] for opt in flag['opts']: temp_buffer = [] self.dest = temp_buffer if opt != None: self._transform_flag(opt) self.curr_flag['format'] = format_backup self._append(self.flags['variable']['testing'], temp_buffer) self._append( self.flags['variable']['testing_blueprint'], opt) else: self._append(self.flags['variable']['testing'], None) self._append( self.flags['variable']['testing_blueprint'], False) prepared_count += 1 return prepared_count
def parse(self): """ Parse recipe file, used for both tests and graph """ if not os.path.isfile(self.path): utils.msg_error( 'recipe', 'Can\'t find recipe \n{}'.format(os.path.abspath(self.path))) return False with open(self.path) as json_file: try: data = json.load(json_file) """ Highest level """ base_level_keys = list(data.keys()) base_level_keys_len = len(base_level_keys) if (base_level_keys_len != 1): if (base_level_keys_len == 0): utils.msg_info('recipe', 'Recipe empty') else: utils.msg_error( 'recipe', 'Recipe includes more than two base operations') return False """ Highest level testing """ for key_item in base_level_keys: if key_item not in list(self.required.keys()): utils.msg_error( 'recipe', '"{}" is not valid base operation'.format( key_item)) return False """ Type of desired operation and recipe type must match """ if (self.type != base_level_keys[0]): utils.msg_error( 'recipe', 'can\'t use "{}" recipe for "{}" mode'.format( base_level_keys[0], self.type)) return False """ Test recipe """ if (self.type == 'tests'): for test in data['tests']: test_type = test['type'] """ Test have two types, either approximation that do not carry final result and final that correspond with their name """ if test_type == 'approximation': test_level_keys = list(test.keys()) test_level_keys_len = len(test_level_keys) if (test_level_keys_len < len( self.required['tests']['core'])): utils.msg_info('recipe', 'Recipe empty') if not os.path.isdir(test['dir']): utils.msg_error( 'recipe', 'Invalid dir path {}'.format(test['dir'])) return False routines = test['routines'] for routine in routines: if 'testing_param' in routines[routine]: testing_param = routines[routine][ 'testing_param']['flag'] if testing_param in routines[routine][ 'params'].keys(): utils.msg_error( 'recipe', 'Can\'t use "{}" as a testing and regular parameter' .format(testing_param)) return False file_opts = test['files'] is_selected = None for key in file_opts: if file_opts[key]: if (is_selected): utils.msg_error( 'recipe', 'Too many options selected for "files", only one allowed' ) return False else: is_selected = key elif test_type == 'final': pass else: utils.msg_error( 'recipe', 'Unknown test type {}'.format(test_type)) return False self.data = data['tests'] elif (self.type == 'graph'): self.data = data['graph'] except ValueError: utils.msg_error( 'recipe', 'file corrupted of not valid \n{}'.format( os.path.abspath(self.path))) return False return True