def tomtom_refit(zipkmap):
    tm.K = zipkmap[0]
    kmap = zipkmap[1]
    print('Currently refitting {} model with K={}'.format(tm.mtype, tm.K),
          flush=True)
    # each element in the second layer is a tensor nrep*nsample*datadim
    stor1_seeds = []
    stor1_maps = []
    stor1_logprobs = []
    for tn in np.arange(len(kmap)):
        print('Where K = {} and sample size is {}'.format(
            tm.K, n_sample_array[tn]),
              flush=True)
        # iterate through the first dimension of the tensor, fitting model for each layer
        stor2_seeds = []
        stor2_maps = []
        stor2_logprobs = []
        tens = kmap[tn]
        for i in np.arange(tens.shape[0]):
            if 'gr' in tm.mtype:
                seed, mmap, mem, lp = tm.tomtom_svi(tens[i], print_fit=False)
            elif 'di' in tm.mtype:
                seed, mmap, lp = tm.tomtom_svi(tens[i], print_fit=False)
            stor2_seeds.append(seed)
            stor2_maps.append(detach_mmap(mmap))
            stor2_logprobs.append(lp.detach())
        stor1_seeds.append(stor2_seeds)
        stor1_maps.append(stor2_maps)
        stor1_logprobs.append(stor2_logprobs)
    return stor1_seeds, stor1_maps, stor1_logprobs
Example #2
0
def tomtom_refit(tens):
    print('Where K = {} and sample size is {}'.format(tm.K, tens.shape[1]),flush=True)
    # iterate through the first dimension of the tensor, fitting model for each layer
    stor2_seeds = []
    stor2_maps = []
    stor2_logprobs = []
    # tens = kmap[tn]
    for i in np.arange(tens.shape[0]):
        if 'gr' in tm.mtype:
            seed, mmap, mem, lp = tm.tomtom_svi(tens[i], print_fit = False)
        elif 'di' in tm.mtype:
            seed, mmap, lp = tm.tomtom_svi(tens[i], print_fit = False)
        stor2_seeds.append(seed)
        stor2_maps.append(detach_mmap(mmap))
        stor2_logprobs.append(lp.detach())
    return stor2_seeds,stor2_maps,stor2_logprobs
Example #3
0
def tomtom_refit(gendat, print_fit = False):
    modrec_seeds = []
    modrec_maps = []
    modrec_logprobs = []
    tm.K = 1
    for kmap in gendat:
        print('Currently refitting {} model with K={}'.format(tm.mtype, tm.K))
        # each element in the second layer is a tensor nrep*nsample*datadim
        stor1_seeds = []
        stor1_maps = []
        stor1_logprobs = []
        for tn in np.arange(len(kmap)):
            print('Where sample size is {}'.format(n_sample_array[tn]))
            # iterate through the first dimension of the tensor, fitting model for each layer
            stor2_seeds = []
            stor2_maps = []
            stor2_logprobs = []
            tens = kmap[tn]
            for i in np.arange(tens.shape[0]):
                seed, mmap, mem, lp = tm.tomtom_svi(tens[i], print_fit = print_fit)
                stor2_seeds.append(seed)
                stor2_maps.append(mmap)
                stor2_logprobs.append(lp)
            stor1_seeds.append(stor2_seeds)
            stor1_maps.append(stor2_maps)
            stor1_logprobs.append(stor2_logprobs)
        modrec_seeds.append(stor1_seeds)
        modrec_maps.append(stor1_maps)
        modrec_logprobs.append(stor1_logprobs)
        tm.K += 1
    return modrec_seeds, modrec_maps, modrec_logprobs
        ttarg_raw_all_3d, ttarg_raw_noauto_3d, tavg_norm_all_3d,
        tavg_norm_noauto_3d, tavg_raw_all_3d, tavg_raw_noauto_3d
    ] = pickle.load(f)

# set model fitting params
tm.K = 3
tm.mtype = 'group'
tm.target = 'self'  # 'self','targ','avg'
tm.dtype = 'raw'  # 'norm','raw'
tm.auto = 'noauto'  # 'noauto','all'
tm.stickbreak = False
tm.optim = pyro.optim.Adam({'lr': 0.0005, 'betas': [0.8, 0.99]})
tm.elbo = TraceEnum_ELBO(max_plate_nesting=1)
dtname = 't{}_{}_{}_3d'.format(tm.target, tm.dtype, tm.auto)
data = globals()[dtname]
seed_grp, mapl_grp, mem_grp, lp_grp, guide_grp = tm.tomtom_svi(
    data, return_guide=True)


# defining sparse-input model
@config_enumerate
def model_multi_obs_grp(obsmat):
    # some parameters can be directly derived from the data passed
    # K = 2
    nparticipants = data.shape[0]
    nfeatures = data.shape[1]  # number of rows in each person's matrix
    ncol = data.shape[2]

    # Background probability of different groups
    if tm.stickbreak:
        # stick breaking process for assigning weights to groups
        with pyro.plate("beta_plate", K - 1):
Example #5
0
modrec_logprobs_self_norm_all_grp = []
# each element in the outermost list is all gendat for a singel k-MAP
tm.K = 1
for kmap in gendat_self_norm_all_grp:
    print(tm.K)
    # each element in the second layer is a tensor nrep*nsample*datadim
    stor1_seeds_self_norm_all_grp = []
    stor1_maps_self_norm_all_grp = []
    stor1_logprobs_self_norm_all_grp = []
    for tens in kmap:
        # iterate through the first dimension of the tensor, fitting model for each layer
        stor2_seeds_self_norm_all_grp = []
        stor2_maps_self_norm_all_grp = []
        stor2_logprobs_self_norm_all_grp = []
        for i in np.arange(tens.shape[0]):
            pyro.clear_param_store()
            seed, mmap, mem, lp = tm.tomtom_svi(tens[i])
            stor2_seeds_self_norm_all_grp.append(seed)
            stor2_maps_self_norm_all_grp.append(mmap)
            stor2_logprobs_self_norm_all_grp.append(lp)
        stor1_seeds_self_norm_all_grp.append(stor2_seeds_self_norm_all_grp)
        stor1_maps_self_norm_all_grp.append(stor2_maps_self_norm_all_grp)
        stor1_logprobs_self_norm_all_grp.append(stor2_logprobs_self_norm_all_grp)
    modrec_seeds_self_norm_all_grp.append(stor1_seeds_self_norm_all_grp)
    modrec_maps_self_norm_all_grp.append(stor1_maps_self_norm_all_grp)
    modrec_logprobs_self_norm_all_grp.append(stor1_logprobs_self_norm_all_grp)
    tm.K += 1

with open('refit_mod_self_norm_all_grp','wb') as f:
    pickle.dump([modrec_maps_self_norm_all_grp,modrec_seeds_self_norm_all_grp,modrec_logprobs_self_norm_all_grp],f)