Пример #1
0
    def replace_assigned_weight(self, fixed_json_path, name):
        """
        Find a place within the master dict called 'name' and add dates from the json file of fixed_json_path

        In some situations you may find a complex problem or a mistake and want to replace a given place rather than
        have to - rerun the whole constructor. This allows you to replace a given place by its key in the base_weights
        file you made on all your data, and a new smaller update file. The changes between the update and the master
        will be logged and then the master file will be updated.

        :param fixed_json_path: The path to the json file to load the fixed dates from
        :type fixed_json_path: Path | str

        :param name: The place key in the master _weights to load and replace dates from
        :type name: str
        """
        # Load the fix file
        fixed = load_json(fixed_json_path)

        # Create the restructured values for the named place
        key_list = self._replacement_keys(name, fixed)
        restructured = {str(year): self._replacement_values(fixed, name, year, new) for year, new in key_list}

        # Updating the existing json with the new information
        write_data = self._weights
        write_data[name] = restructured
        write_json(write_data, self._weights_path.parent, self._weights_path.stem)
Пример #2
0
    def __init__(self,
                 external_data_path,
                 weights_path,
                 date_max,
                 delimiter="__"):

        # Load the external data
        assert Path(
            external_data_path).exists(), "Path to external data is invalid"
        self.database = load_json(external_data_path)

        # The delimiter to access GID and the end date for weighting
        self.delimiter = delimiter
        self._user_end_date = date_max

        # Create a GID: Place lookup dict to aid extraction of data
        self.searcher = {
            place.split(self.delimiter)[0]: place
            for place in self.database.keys()
        }

        # The unique attributes from all places
        self.attributes = list(
            set([
                attr for place in self.database.keys()
                for attr in self.database[place].keys()
                if isinstance(self.database[place][attr], dict)
            ]))

        # The weight dates created via AssignWeights
        self._weights_dates = load_json(weights_path)

        # Output json's of the master weighting database as well as a non_common to aid finding weight errors
        self._master = {}
        self._non_common = {
            place_name: {}
            for place_name in self._weights_dates
        }
Пример #3
0
    def combine_dataset(self, path_list, write_directory, database_name):
        """
        This will combine all the dataset's you have made into a single json database

        This will combine all the regional data from all standardised dataset's into a single json database. If you only
        had 1 database to begin with, then this just adds all the separate json databases into a single 1. Where it is
        mostly used, is when you have run this process on multiple dataset's and now want all the standardised places to
        share attribute data in a single database.

        :param path_list: A list of paths, where each path goes to a set directory
        :type path_list: list[str | Path]

        :param write_directory: The write directory of the master database
        :type write_directory: str | Path

        :param database_name: The master database name
        :type database_name: str

        :return: Nothing, write the database to file then stop
        :rtype: None
        """

        # Initialise the output database
        master_database = {}

        # Isolate all the paths to all the files we want to load across all the database for this geo-level
        level_data = [
            Path(path, file) for path in path_list
            for file in directory_iterator(path)
        ]

        for index, file in enumerate(level_data):
            if index % 100 == 0:
                print(f"{index}/{len(level_data)}")

            # Load the data for this file into memory, set the master database assign name via Place_Name
            load_data = load_json(file)
            assign_name = load_data["Place_Name"]

            # If the current attribute does not exist within the current database, add it to it
            current_attributes = self._current_attributes(
                master_database, assign_name)
            for attr in load_data.keys():
                if attr not in current_attributes:
                    master_database[assign_name][attr] = load_data[attr]

        write_json(master_database, write_directory, database_name)
Пример #4
0
    def __init__(self, args):

        load_path, label_threshold, name_index, x_index, y_index, ico_scale, text_scale, label_scale, text_colour = args

        self.scatter_groups = load_json(load_path)

        self.label_threshold = float(label_threshold)
        self.name_index = int(name_index)
        self.x_index = int(x_index)
        self.y_index = int(y_index)
        self.ico_scale = (float(ico_scale))
        self._text_scale = float(text_scale)
        self._label_scale = float(label_scale)
        self._text_colour = tuple_convert(text_colour)

        self._y_max = []
        self._make_point_groups()
        self.make_y_axis()
Пример #5
0
    def __init__(self, args):
        self.root = args

        self.x_iterator = 0
        self.y_iterator = 0
        self.background_colour = (0.05, 0.05, 0.05, 1)
        self.scale_adjust = 0.85

        # Border width is 0.1-1
        self.border_width = 0.05
        self.iterator = 2 - self.border_width

        self.data = load_json(args)
        self.bg_material = self._make_material('Background',
                                               self.background_colour)

        self._load_materials()
        [self.construct_grid(key) for key in self.data["Process"]]
Пример #6
0
def main():
    write_directory = r"I:\Work\Figures_and_tables\BIO-HGIS"
    frame_dict = load_json(r"I:\Work\BIO-HGIS\Releases\Json\GBHD.txt")

    attributes = sorted(list(set(flatten([[vv for vv in v.keys()] for v in frame_dict.values()]))))
    attributes = [attr for attr in attributes if attr != 'GID']

    colours = [(0.05, 0.05, 0.05, 1)] + [(0.15 + i / 10, 0.15 + i / 10, 0.15 + i / 10, 1) for i in range(8)] + [
        (1, 1, 1, 1)]
    colours = colours[::-1]

    dates = sorted(list(set(flatten(
        [flatten([v.keys() for k, v in value.items() if k != 'GID']) for value in frame_dict.values()]))))

    obj = bpy.context.scene.objects.get('Districts')
    obj.select_set(True)
    place_dict = {colour.name: colour.node_tree.nodes.get('Emission') for colour in obj.data.materials}

    for attr in attributes:
        print(attr)
        _make_directory(write_directory, attr)

        for d in dates:

            colour_dict, q_values = _create_colour_dict(frame_dict, attr, d, colours)
            if colour_dict:
                bpy.ops.object.select_all(action='DESELECT')

                for i, text in enumerate(q_values, 1):
                    _change_element_colour(f"Q{i}", colours[i - 1])
                    _change_element_colour(f"Q{i}T", colours[i - 1])
                    _change_text(f"Q{i}T", text)

                bpy.ops.object.select_all(action='DESELECT')
                for place, colour in colour_dict.items():
                    place_dict[place].inputs[0].default_value = colour

                bpy.context.scene.render.filepath = str(Path(write_directory, attr, f"{d}.png").absolute())
                bpy.context.scene.eevee.use_gtao = True
                bpy.context.scene.render.film_transparent = True
                bpy.ops.render.render(write_still=True)
Пример #7
0
    def _setup(working_directory, weights_path, population_weights, dates_path):
        """
        Validate paths, load files, and set weight key nad date indexes
        """

        assert Path(working_directory).exists(), f"Working Directory invalid"

        # Validate dates and load the csv
        assert Path(dates_path).exists(), "Dates path invalid"
        dates = CsvObject(dates_path)
        date_indexes = [index for index, head in enumerate(dates.headers) if "Changes" in head]

        # Validate weights path and load the json
        weights_path = Path(weights_path)
        assert weights_path.exists()
        weights = load_json(weights_path)

        # Determine the population key based on the type specified
        if population_weights:
            weight_key = "Population"
        else:
            weight_key = "Area"

        return working_directory, weights, weight_key, dates, date_indexes
Пример #8
0
from miscSupports import load_json
from pathlib import Path
import bpy

frame_dict = load_json(
    r"C:\Users\Samuel\PycharmProjects\pyBlendFigures\TestV2\Map2\Test2\UE_Values.txt"
)

write_directory = r"I:\Work\Figures_and_tables\Depreivation indexes\UEOverTime"

for frame_id, frame_place_values in frame_dict.items():

    for index, (place, colour) in enumerate(frame_place_values.items()):
        print(f"F{frame_id}: {index}/{len(frame_place_values)}")

        # Deselect any objects
        bpy.ops.object.select_all(action='DESELECT')

        # Isolate the current object
        obj = bpy.context.scene.objects.get(place)
        obj.select_set(True)

        # Isolate the first material emission node, change its colour to the colour required
        for mat in obj.data.materials:
            emission = mat.node_tree.nodes.get('Emission')
            emission.inputs[0].default_value = colour

    bpy.context.scene.render.filepath = str(
        Path(write_directory, f"{frame_id}.png").absolute())
    bpy.ops.render.render(write_still=True)
Пример #9
0
 def __init__(self, working_directory, weights_path):
     self._working_dir = working_directory
     self._weights_path = Path(weights_path)
     assert self._weights_path.exists(), "Path to weights is invalid"
     self._weights = load_json(self._weights_path)