def get_bias_and_variance_from_files(self): bias = array(load_from_text_file( os.path.join(self.output_directory, self.bias_file_name), convert_to_float=True)) variance = array(load_from_text_file( os.path.join(self.output_directory, self.variance_file_name), convert_to_float=True)) if variance.ndim == 1: variance.reshape(variance.size, 1) ahat={} v = {} for l in range(bias.size): ahat[l] = bias[l] v[l] = variance[l,:] return (ahat, v)
def __init__(self, filename, package_order=['core'], additional_datasets={}, cache_file_location=None, transformation_pair = (None, None), **kwargs): """The file 'filename' has the following structure: 1. line: base_year calibration_year 2 lines per each variable: 1. variable_name 2. bias variance """ content = load_from_text_file(filename) nvar = (content.size-1)/2 self.base_year, self.year = map(lambda x: int(x), content[0].split(' ')) self.variable_names = [] self.ahat = {} self.v = {} self.weight_components = {} self.package_order = package_order counter = 1 for i in range(nvar): self.variable_names.append(content[counter]) counter += 1 splitted_row = content[counter].split(' ') self.ahat[i], self.v[i] = map(lambda x: array([float(x)]), splitted_row) self.weight_components[i] = array([1.]) counter += 1 self.weights = array([1.]) self.number_of_runs = 1 self.additional_datasets = additional_datasets if cache_file_location is not None: BayesianMelding.set_cache_attributes(self, cache_file_location, **kwargs) self.transformation_pair_for_prediction = transformation_pair
def __init__(self, filename, package_order=['core'], additional_datasets={}): """The file 'filename' has the following structure: 1. line: base_year calibration_year 2 lines per each variable: 1. variable_name 2. bias variance """ content = load_from_text_file(filename) nvar = (content.size - 1) / 2 self.base_year, self.year = map(lambda x: int(x), content[0].split(' ')) self.variable_names = [] self.ahat = {} self.v = {} self.weight_components = {} self.package_order = package_order counter = 1 for i in range(nvar): self.variable_names.append(content[counter]) counter += 1 splitted_row = content[counter].split(' ') self.ahat[i], self.v[i] = map(lambda x: array([float(x)]), splitted_row) self.weight_components[i] = array([1.]) counter += 1 self.weights = array([1.]) self.number_of_runs = 1 self.additional_datasets = additional_datasets
def __init__(self, filename, package_order=['core'], additional_datasets={}): """The file 'filename' has the following structure: 1. line: base_year calibration_year 2 lines per each variable: 1. variable_name 2. bias variance """ content = load_from_text_file(filename) nvar = (content.size-1)/2 self.base_year, self.year = map(lambda x: int(x), content[0].split(' ')) self.variable_names = [] self.ahat = {} self.v = {} self.weight_components = {} self.package_order = package_order counter = 1 for i in range(nvar): self.variable_names.append(content[counter]) counter += 1 splitted_row = content[counter].split(' ') self.ahat[i], self.v[i] = map(lambda x: array([float(x)]), splitted_row) self.weight_components[i] = array([1.]) counter += 1 self.weights = array([1.]) self.number_of_runs = 1 self.additional_datasets = additional_datasets
def convert_file(self, file_directory, file_name, output_directory): file_path = os.path.join(file_directory, file_name) file_stem, extension = os.path.splitext(file_name) if not os.path.exists(output_directory): os.makedirs(output_directory) if extension in self.old_to_new_extension_mapping_for_binary_files(): # Copy file to name with new extension new_extension = self.old_to_new_extension_mapping_for_binary_files()[extension] new_file_path = os.path.join(output_directory, "%s%s" % (file_stem, new_extension)) copyfile(file_path, new_file_path) elif extension == ".txt": data = load_from_text_file(file_path) numpy_array = numpy.array(data) storage = file_flt_storage.storage_file(None) storage._write_to_file(output_directory, file_stem, numpy_array) else: copyfile(file_path, os.path.join(output_directory, file_name))
def convert_file(self, file_directory, file_name, output_directory): file_path = os.path.join(file_directory, file_name) file_stem, extension = os.path.splitext(file_name) if not os.path.exists(output_directory): os.makedirs(output_directory) if extension in self.old_to_new_extension_mapping_for_binary_files(): # Copy file to name with new extension new_extension = self.old_to_new_extension_mapping_for_binary_files( )[extension] new_file_path = os.path.join(output_directory, '%s%s' % (file_stem, new_extension)) copyfile(file_path, new_file_path) elif extension == '.txt': data = load_from_text_file(file_path) numpy_array = numpy.array(data) storage = file_flt_storage.storage_file(None) storage._write_to_file(output_directory, file_stem, numpy_array) else: copyfile(file_path, os.path.join(output_directory, file_name))
def get_urbansim_last_year(self, config): self.run_remote_python_process('%s/%s/write_last_urbansim_year.py' % (self.remote_opus_path, self.script_path), '-d %s -o %s/last_year.txt' % (config['cache_directory'], self.remote_communication_path)) self.copy_file_from_remote_host('last_year.txt', self.local_output_path) return load_from_text_file('%s/last_year.txt' % self.local_output_path, convert_to_float=True)[0]
def _set_cache_set(self, filename): self.cache_set = load_from_text_file(filename) self.full_cache_file_name = filename logger.log_status('Multiple Runs consist of %s runs (loaded from %s)' % (self.cache_set.size, filename))
def get_weights_from_file(self): file = os.path.join(self.output_directory, self.weights_file_name) if not os.path.exists(file): raise StandardError, "Directory %s must contain a file '%s'. Use method 'compute_weights'." % (self.output_directory, self.weights_file_name) return array(load_from_text_file(file, convert_to_float=True))