def construct_noise(name, modelParams, sigma=0.05, sigma_age=0.02): noise_R_sigma = yield HalfNormal( name=f"{name}_sigma", scale=sigma, conditionally_independent=True, event_stack=(modelParams.num_countries,), shape_label=("country"), transform=transformations.SoftPlus(), ) noise_R_sigma_age = yield HalfNormal( name=f"{name}_sigma_age", scale=sigma_age, conditionally_independent=True, event_stack=(modelParams.num_countries, modelParams.num_age_groups), shape_label=("country", "age_group"), transform=transformations.SoftPlus(), ) noise_R = ( yield Normal( name=f"{name}", loc=0.0, scale=1.0, event_stack=(modelParams.length_sim, modelParams.num_countries,), shape_label=("time", "country"), conditionally_independent=True, ) ) * noise_R_sigma[..., tf.newaxis, :] noise_R_age = ( yield Normal( name=f"{name}_age", loc=0.0, scale=1.0, event_stack=( modelParams.length_sim, modelParams.num_countries, modelParams.num_age_groups, ), shape_label=("time", "country", "age_group"), conditionally_independent=True, ) ) * noise_R_sigma_age[..., tf.newaxis, :, :] sum_noise_R = tf.math.cumsum( noise_R[..., tf.newaxis] + noise_R_age, exclusive=True, axis=-2 ) return sum_noise_R
def construct_C(name, modelParams, mean_C=-0.5, sigma_C=1, sigma_country=0.5, sigma_age=0.5): C_country_sigma = yield HalfNormal( name=f"{name}_country_sigma", scale=sigma_country, conditionally_independent=True, event_stack=(1, 1), ) C_age_sigma = yield HalfNormal( name=f"{name}_age_sigma", scale=sigma_age, conditionally_independent=True, event_stack=(1, 1), ) Delta_C_country = (yield Normal( name=f"Delta_{name}_country", loc=0, scale=1, conditionally_independent=True, event_stack=(modelParams.num_countries, 1), shape_label=("country", None), )) * C_country_sigma Delta_C_age = (yield Normal( name=f"Delta_{name}_age", loc=0, scale=1, conditionally_independent=True, event_stack=( 1, modelParams.num_age_groups * (modelParams.num_age_groups - 1) // 2, ), shape_label=(None, "age groups cross terms"), )) * C_age_sigma Base_C = (yield Normal( name=f"Base_{name}", loc=0, scale=sigma_C, conditionally_independent=True, event_stack=(1, 1), )) + mean_C C_array = Base_C + Delta_C_age + Delta_C_country C_array = tf.math.sigmoid(C_array) C_array = tf.clip_by_value( C_array, 0, 0.99) # ensures off diagonal terms are smaller than diagonal terms size = modelParams.num_age_groups transf_array = lambda arr: normalize_matrix( _subdiagonal_array_to_matrix(arr, size) + tf.linalg.eye( size, dtype=arr.dtype)) C_matrix = transf_array(C_array) C_matrix = yield Deterministic( name=f"{name}", value=C_matrix, shape_label=("country", "age_group_i", "age_group_j"), ) yield Deterministic( name=f"{name}_mean", value=transf_array(tf.math.sigmoid(Base_C + Delta_C_age))[..., 0, :, :], shape_label=("age_group_i", "age_group_j"), ) return C_matrix
def construct_E_0_t( modelParams, len_gen_interv_kernel, R_t, mean_gen_interv, mean_test_delay=10, ): r""" Generates a prior for E_0_t, based on the observed number of cases during the first 5 days. Currently it is implemented to take the first value of R_t, and multiply the inverse of R_t with first observed values until the begin of the simulation is reached. This is then used as a prior for a lognormal distribution which set the E_0_t. Parameters ---------- modelParams: :py:class:`covid19_npis.ModelParams` Instance of modelParams, mainly used for number of age groups and number of countries. len_gen_interv_kernel: number ...some description R_t: tf.tensor Time dependent reproduction number tensor :math:`R(t)`. |shape| time, batch, country, age group mean_gen_interv: countries ...some description mean_test_delay: number, optional ...some description |default| 10 Returns ------- : E_0_t: some description |shape| time, batch, country, age_group """ batch_dims = tuple(R_t.shape)[:-3] data = modelParams.pos_tests_data_array assert data.ndim == 3 assert (modelParams.offset_sim_data >= len_gen_interv_kernel + mean_test_delay), "min_offset_sim_data is to small" i_data_begin_list = modelParams.indices_begin_data i_sim_begin_list = i_data_begin_list - len_gen_interv_kernel - mean_test_delay # eigvals, _ = tf.linalg.eigh(R_t[..., i_data_begin, :, :]) # largest_eigval = eigvals[-1] R_t_rescaled = R_t**(1 / 5.0) R_inv = 1 / R_t_rescaled R_inv = tf.clip_by_value(R_inv, clip_value_min=0.7, clip_value_max=1.2) """ R = R_t_rescaled[0] R_sqrt = tf.math.sqrt(R) R_diag = tf.linalg.diag(R_sqrt) R_eff = R_diag @ C @ R_diag log.debug(f"R_eff for h_0_t construction {R_eff.shape}:\n{R_eff}") R_eff_inv = tf.linalg.pinv(R_eff) log.debug(f"R_eff_inv for h_0_t construction:\n{R_eff_inv}") """ avg_cases_begin = [] for c in range(data.shape[1]): avg_cases_begin.append( np.nanmean(data[i_data_begin_list[c]:i_data_begin_list[c] + 5, c], axis=0)) avg_cases_begin = np.array(avg_cases_begin) E_t = tf.convert_to_tensor(avg_cases_begin) log.debug(f"avg_cases_begin:\n{avg_cases_begin}") if len(R_t.shape) == 5: perm_forw = (3, 0, 1, 2, 4) perm_back = (1, 2, 3, 0, 4) elif len(R_t.shape) == 4: perm_forw = (2, 0, 1, 3) perm_back = (1, 2, 0, 3) elif len(R_t.shape) == 3: perm_forw = (1, 0, 2) perm_back = (1, 0, 2) else: raise RuntimeError("Unknown rank") """ for i in range(diff_sim_data - len_gen_interv_kernel - mean_test_delay): R_current = tf.transpose( tf.gather( tf.transpose(R_inv, perm=perm_forw), tf.constant(i_data_begin_list - i)[:, tf.newaxis], axis=1, batch_dims=1, ), perm=perm_back, )[ 0 ] # A little complicated expression, because tensorflow doesn't allow advanced numpy indexing E_t = R_current * E_t log.debug(f"i, E_t:{i}\n{E_t}") """ E_0_t_mean = [None for _ in range(len_gen_interv_kernel - 1, -1, -1)] R_inv_transposed = tf.transpose(R_inv, perm=perm_forw) for i in range(len_gen_interv_kernel - 1, -1, -1): # R = tf.gather(R_t_rescaled, i_sim_begin_list + i, axis=-3, batch_dims=1,)) # E_t = tf.linalg.matvec(R_eff_inv, E_t) R_current = tf.transpose( tf.gather( R_inv_transposed, tf.constant(i_data_begin_list - i - mean_test_delay)[:, tf.newaxis], axis=1, batch_dims=1, ), perm=perm_back, )[0] # A little complicated expression, because tensorflow doesn't allow advanced numpy indexing E_t = R_current * E_t log.debug(f"i, E_t:{i}\n{E_t}") E_0_t_mean[i] = E_t E_0_t_mean = tf.stack(E_0_t_mean, axis=-3) E_0_t_mean = tf.clip_by_value(E_0_t_mean, 1e-5, 1e6) log.debug(f"E_0_t_mean:\n{E_0_t_mean}") E_0_diff_base = yield Normal( name="E_0_diff_base", loc=0.0, scale=3.0, conditionally_independent=True, event_stack=tuple(E_0_t_mean[..., 0:1, :, :].shape[-3:]), ) E_0_base = E_0_t_mean[..., 0:1, :, :] * tf.exp(E_0_diff_base) E_0_mean_diff = E_0_t_mean[..., 1:, :, :] - E_0_t_mean[..., :-1, :, :] E_0_diff_add = yield Normal( name="E_0_diff_add", loc=0.0, scale=1.0, conditionally_independent=True, event_stack=tuple(E_0_mean_diff.shape[-3:]), ) E_0_base_add = E_0_mean_diff * tf.exp(E_0_diff_add) log.debug(f"E_0_base:\n{E_0_base}") log.debug(f"E_0_base_add:\n{E_0_base_add}") log.debug(f"R_t:\n{R_t.shape}") E_0_t_rand = tf.math.cumsum( tf.concat( [ E_0_base, E_0_base_add, ], axis=-3, ), axis=-3, ) # shape: batch_dims x len_gen_interv_kernel x countries x age_groups E_0_t_rand = tf.einsum( "...kca->k...ca", E_0_t_rand ) # Now: shape: len_gen_interv_kernel x batch_dims x countries x age_groups log.debug(f"E_0_t_rand:\n{E_0_t_rand}") E_0_t = [] batch_shape = R_t.shape[1:-2] log.debug(f"batch_shape:\n{batch_shape}") total_len = R_t.shape[0] age_shape = R_t.shape[-1:] for i, i_begin in enumerate(i_sim_begin_list): E_0_t.append( tf.concat( [ tf.zeros((i_begin, ) + batch_shape + (1, ) + age_shape), E_0_t_rand[..., i:i + 1, :], tf.zeros((total_len - len_gen_interv_kernel - i_begin, ) + batch_shape + (1, ) + age_shape), ], axis=0, )) E_0_t = tf.concat(E_0_t, axis=-2) return E_0_t
def _create_distributions(modelParams): r""" Returns a dict of distributions for further processing/sampling with the following priors: .. math:: \alpha^\dagger_i &\sim \mathcal{N}\left(-1, 2\right)\quad \forall i,\\ \Delta \alpha^\dagger_c &\sim \mathcal{N}\left(0, \sigma_{\alpha, \text{country}}\right) \quad \forall c, \\ \Delta \alpha^\dagger_a &\sim \mathcal{N}\left(0, \sigma_{\alpha, \text{age}}\right)\quad \forall a, \\ \sigma_{\alpha, \text{country}} &\sim HalfNormal\left(0.1\right),\\ \sigma_{\alpha, \text{age}} &\sim HalfNormal\left(0.1\right) .. math:: l^\dagger_{\text{positive}} &\sim \mathcal{N}\left(3, 1\right),\\ l^\dagger_{\text{negative}} &\sim \mathcal{N}\left(5, 2\right),\\ \Delta l^\dagger_i &\sim \mathcal{N}\left(0,\sigma_{l, \text{interv.}} \right)\quad \forall i,\\ \sigma_{l, \text{interv.}}&\sim HalfNormal\left(1\right) .. math:: \Delta d_i &\sim \mathcal{N}\left(0, \sigma_{d, \text{interv.}}\right)\quad \forall i,\\ \Delta d_c &\sim \mathcal{N}\left(0, \sigma_{d, \text{country}}\right)\quad \forall c,\\ \sigma_{d, \text{interv.}} &\sim HalfNormal\left(0.3\right),\\ \sigma_{d, \text{country}} &\sim HalfNormal\left(0.3\right) Parameters ---------- modelParams: :py:class:`covid19_npis.ModelParams` Instance of modelParams, mainly used for number of age groups and number of countries. Return ------ : interventions, distributions """ log.debug("_create_distributions") """ Δ Alpha cross for each country and age group with hyperdistributions """ alpha_sigma_c = HalfNormal( name="alpha_sigma_country", scale=0.1, transform=transformations.SoftPlus(scale=0.1), conditionally_independent=True, ) alpha_sigma_a = HalfNormal( name="alpha_sigma_age_group", scale=0.1, transform=transformations.SoftPlus(scale=0.1), conditionally_independent=True, ) # We need to multiply alpha_sigma_c and alpha_sigma_a later. (See construct R_t) delta_alpha_cross_c = Normal( name="delta_alpha_cross_c", loc=0.0, scale=1.0, event_stack=(1, modelParams.num_countries, 1), # intervention country age_group shape_label=(None, "country", None), conditionally_independent=True, ) delta_alpha_cross_a = Normal( name="delta_alpha_cross_a", loc=0.0, scale=1.0, event_stack=( 1, 1, modelParams.num_age_groups, ), # intervention country age_group shape_label=(None, None, "age_group"), conditionally_independent=True, ) alpha_cross_i = Normal( name="alpha_cross_i", loc=-1.0, # See publication for reasoning behind -1 and 2 scale=2.0, conditionally_independent=True, event_stack=( modelParams.num_interventions, 1, 1, ), # intervention country age_group shape_label=("intervention", None, None), ) """ l distributions """ l_sigma_interv = HalfNormal( name="l_sigma_interv", scale=1.0, transform=transformations.SoftPlus(), conditionally_independent=True, ) delta_l_cross_i = Normal( name="delta_l_cross_i", loc=0.0, scale=1.0, conditionally_independent=True, event_stack=(modelParams.num_interventions, ), shape_label=("intervention"), ) log.debug(f"l_sigma_interv\n{l_sigma_interv}") # Δl_i^cross was created in intervention class see above l_positive_cross = Normal( name="l_positive_cross", loc=3.0, scale=1.0, conditionally_independent=True, event_stack=(1, ), ) l_negative_cross = Normal( name="l_negative_cross", loc=5.0, scale=2.0, conditionally_independent=True, event_stack=(1, ), ) """ date d distributions """ d_sigma_interv = HalfNormal( name="d_sigma_interv", scale=0.3, transform=transformations.SoftPlus(scale=0.3), conditionally_independent=True, ) d_sigma_country = HalfNormal( name="d_sigma_country", scale=0.3, transform=transformations.SoftPlus(scale=0.3), conditionally_independent=True, ) delta_d_i = Normal( name="delta_d_i", loc=0.0, scale=1.0, event_stack=(modelParams.num_interventions, 1, 1), shape_label=("intervention", None, None), conditionally_independent=True, ) delta_d_c = Normal( name="delta_d_c", loc=0.0, scale=1.0, event_stack=(1, modelParams.num_countries, 1), shape_label=(None, "country", None), conditionally_independent=True, ) # We create a dict here to pass all distributions to another function distributions = {} distributions["alpha_sigma_c"] = alpha_sigma_c distributions["alpha_sigma_a"] = alpha_sigma_a distributions["delta_alpha_cross_c"] = delta_alpha_cross_c distributions["delta_alpha_cross_a"] = delta_alpha_cross_a distributions["alpha_cross_i"] = alpha_cross_i distributions["l_sigma_interv"] = l_sigma_interv distributions["l_positive_cross"] = l_positive_cross distributions["l_negative_cross"] = l_negative_cross distributions["delta_l_cross_i"] = delta_l_cross_i distributions["d_sigma_interv"] = d_sigma_interv distributions["d_sigma_country"] = d_sigma_country distributions["delta_d_i"] = delta_d_i distributions["delta_d_c"] = delta_d_c return distributions
def construct_R_0(name, modelParams, loc, scale, hn_scale): r""" Constructs R_0 in the following hierarchical manner: .. math:: R^*_{0,c} &= R^*_0 + \Delta R^*_{0,c}, \\ R^*_0 &\sim \mathcal{N}\left(2,0.5\right)\\ \Delta R^*_{0,c} &\sim \mathcal{N}\left(0, \sigma_{R^*, \text{country}}\right)\quad \forall c,\\ \sigma_{R^*, \text{country}} &\sim HalfNormal\left(0.3\right) Parameters ---------- name: str Name of the distribution (gets added to trace). modelParams: :py:class:`covid19_npis.ModelParams` Instance of modelParams, mainly used for number of age groups and number of countries. loc: number Location parameter of the R^*_0 Normal distribution. scale: number Scale paramter of the R^*_0 Normal distribution. hn_scale: number Scale parameter of the \sigma_{R^*, \text{country}} HaflNormal distribution. Returns ------- : R_0 tensor |shape| batch, country, age_group """ R_0 = (yield Normal( name="R_0", loc=0.0, scale=scale, conditionally_independent=True, )) + loc log.debug(f"R_0:\n{R_0}") R_0_sigma_c = (yield HalfNormal( name="R_0_sigma_c", scale=1.0, conditionally_independent=True, transform=transformations.SoftPlus(), )) * hn_scale delta_R_0_c = (yield Normal( name="delta_R_0_c", loc=0.0, scale=1.0, event_stack=(modelParams.num_countries), shape_label=("country"), conditionally_independent=True, )) * R_0_sigma_c[..., tf.newaxis] log.debug(f"delta_R_0_c:\n{delta_R_0_c}") # Add to trace via deterministic R_0_c = R_0[..., tf.newaxis] + delta_R_0_c log.debug(f"R_0_c before softplus:\n{R_0_c}") # Softplus because we want to make sure that R_0 > 0. R_0_c = tf.math.softplus(R_0_c) R_0_c = yield Deterministic( name=name, value=R_0_c, shape_label=("country"), ) log.debug(f"R_0_c:\n{R_0_c}") # for robustness tf.clip_by_value(R_0_c, 1, 5) return tf.repeat(R_0_c[..., tf.newaxis], repeats=modelParams.num_age_groups, axis=-1)