def generate_cache_for_all_grid_size(class_, layer): """ Generate the cache for all species, at all grid levels, that have out-of-date caches and have a total of equal to or more than cache_occurrence_clusters_threshold records. """ log = logging.getLogger(__name__) log.debug("Generating cache for all grid sizes") for grid_size in class_.GRID_SIZES: class_.generate_cache_clusters(layer, grid_size) DBSession.flush() log.debug("Finished generating cache for all grid sizes")
def generate_cache_clusters(class_, layer, grid_size): log = logging.getLogger(__name__) log.debug("Generating cache for grid size: %s", grid_size) cache_record = class_.CacheRecord(grid_size) layer.cache_records.append(cache_record) DBSession.flush() clusters = GriddedAndBoundMappablePoint.get_points_as_wkt(layer, grid_size=grid_size)\ .filter( MappablePoint.layer_id == layer.id ) i = 0 for cluster in clusters: i += 1 centroid = cluster.centroid cluster_size = cluster.cluster_size # locations = cluster.locations cached_mappable_cluster = class_.CachedMappablePointCluster(cluster_size, centroid) #, locations) cache_record.cached_mappable_point_clusters.append(cached_mappable_cluster) if (i % 10000 == 0): log.debug("Up to cluster: %i", i) DBSession.flush()