def import_script(script_name=None, level_name=None): # get name of script file if not script_name: #if script_file not defined while True: print "Please enter the name of the script file." print "The current working directory is " print print os.getcwd() script_name = raw_input("Path to .6vscript file: ") if not script_name: print "You must specify a script to import." continue else: try: with open(script_name): pass except IOError: print 'File not found.' else: break print # Checks whether level_name specified beforehand (for quiet execution) while not level_name: print "Please enter the filename of the level" print "(do not include .vvvvvv or else bad things will happen)" level_name = utils.get_level_name() if not level_name: print "You must enter a level name" # backup level file print "Backing up level file..." backup_file = utils.level_backup(level_name) print "Backup saved to " + backup_file # get raw level data from file level_data = utils.get_raw_data(utils.get_vvvvvv_dir(), level_name) # get raw script data from file raw_script_data = utils.get_script_filedata(script_name) # convert script data to raw data script_data = utils.script_to_raw(raw_script_data) if not script_data: raise IOError # Adding script data to level data in memory utils.import_script_data(level_data, script_data) # going hot! success = utils.write_level_data(utils.get_vvvvvv_dir(), level_name, level_data) if success: print "File successfully written." else: print "An error occurred when writing the file."
def _get_levels_metadata(self, raw_number_records, duration): """Gets level meta infomation for each level. Args: raw_number_records: An int that represents the number of raw records. duration: An int that represents duration of the power test which produced the DMM power data. Returns: A tuple of length 2, that contains level meta info ojbject and level names. """ assert self._number_per_slice > 0 assert self._downsample_level_factor > 1 levels = [] level_names = [] number_records = raw_number_records index = 0 while index == 0 or number_records >= self._minimum_number_level: frequency = number_records / duration level_name = utils.get_level_name(index) number_slices = ceil(number_records / self._number_per_slice) slice_names = [ '/'.join([level_name, utils.get_slice_name(index)]) for index in range(number_slices) ] level = { "names": slice_names, "frequency": frequency, "number": number_records } levels.append(level) level_names.append(level_name) index += 1 number_records = number_records // self._downsample_level_factor return levels, level_names
def fetch(self, strategy, number_records, timespan_start, timespan_end): """Gets the records in given timespan, downsample the fetched data with given strategy if needed. Read the records and downsample the records to be within number_records. First we search the level that has frequency the least higher than the required frequency. Then find the first and last slice for the given time span. Since records are sorted, first and last slices are found by binary search, then all slices in between are selected and downsampled to return. Args: strategy: A string representing a downsampling strategy. number_records: An interger representing number of records to return. timespan_start: An integer representing the timestamp in microseconds of the start of timespan. timespan_end: An integer representing the timestamp in microseconds of the end of timespan. Returns: A list of downsampled data in the given file, and precision for this result. Example: [ { 'name':'sys', 'data':[ [time,power], [time,power] ]}, { 'name': 'channel2', 'data': [ [time,power] ] } ] """ self._metadata = Metadata(self._preprocess_dir, bucket=self._preprocess_bucket) self._metadata.load() if timespan_start is None: timespan_start = self._metadata['start'] if timespan_end is None: timespan_end = self._metadata['end'] if timespan_start > self._metadata[ 'end'] or timespan_end < self._metadata['start']: return [] required_frequency = number_records / (timespan_end - timespan_start) # Finds Downsample Level. target_level_index = self._binary_search([ self._metadata['levels'][level_name]['frequency'] for level_name in self._metadata['levels']['names'] ], required_frequency, True) target_level = self._metadata['levels'][self._metadata['levels'] ['names'][target_level_index]] level_metadata = Metadata(self._preprocess_dir, self._preprocess_bucket, strategy, utils.get_level_name(target_level_index)) level_metadata.load() first_slice = self._binary_search([ level_metadata[single_slice] for single_slice in target_level['names'] ], timespan_start) last_slice = self._binary_search([ level_metadata[single_slice] for single_slice in target_level['names'] ], timespan_end) target_slices_names = target_level['names'][first_slice:last_slice + 1] target_slice_paths = [ utils.get_slice_path(self._preprocess_dir, utils.get_level_name(target_level_index), single_slice, strategy) for single_slice in target_slices_names ] # Reads records and downsamples. target_slices = LevelSlicesReader(target_slice_paths, self._preprocess_bucket) target_slices.read(timespan_start, timespan_end) number_target_records = target_slices.get_records_count() target_slices.downsample(strategy, max_records=number_records) downsampled_data = target_slices.format_response() number_result_records = target_slices.get_records_count() if number_target_records == 0: precision = 0 else: precision = number_result_records / \ number_target_records * \ (target_level['number']/self._metadata['raw_number']) return downsampled_data, precision
def extract(level_name=None, save_file=None): # Initializing variables filedata = "" script_data = None vvvvvv_dir = None # Get current opsys vvvvvv_dir = utils.get_vvvvvv_dir() # Checks whether level_name specified beforehand (for quiet execution) if not level_name: # request filename from user while True: level_name = None level_name = utils.get_level_name() if not level_name: print "You must enter a level name" continue # get level data raw_data = utils.get_raw_data(vvvvvv_dir, level_name) if not raw_data: print "Error: level does not exist" continue else: break else: raw_data = utils.get_raw_data(vvvvvv_dir, level_name) # get script data script_data = utils.get_script_data(raw_data) if not script_data: print "No script found" quit() final_data = utils.cleanup_data(script_data) print "Done!" # checks if save_file specified beforehand (for quiet execution) if not save_file: cwd = os.getcwd() print print "What file do you wish me to save the data to?" print "Current working directory is: " print print cwd print print "You may enter a filename to save in current directory," print "enter a relative path, or a full path." print print "Else, press return to accept the default, which is: " print print level_name + ".6vscript" print save_file = raw_input("Save file: ") if not save_file: save_file = level_name + ".6vscript" else: pass with open(save_file, 'w') as outfile: for line in final_data: outfile.write(line + '\n') print save_file + " written"