logging.captureWarnings(True) faces = fetch_olivetti_faces() images = faces["images"] print("faces loaded", images.shape) # Split Images Vertically in 16 blocks img_size = (64, 64) num_v_blocks = 16 num_blocks = (num_v_blocks, 1) datasets = [] for i in range(num_v_blocks): block_ids = np.arange(i, -1, -1) datasets.append((get_blocks(images, num_blocks=num_blocks, blocks=block_ids.tolist()), 1)) num_mpes = 1 num_samples = 10 cspns = [] mpe_query_blocks = None sample_query_blocks = None for i, ((tr_block, block_idx), conditional_blocks) in enumerate(datasets): print("learning", i) conditional_features_count = (tr_block.shape[1] // len(block_idx)) * conditional_blocks if i == 0: # spn ds_context = Context(meta_types=[MetaType.REAL] * tr_block.shape[1]) ds_context.add_domains(tr_block) ds_context.parametric_types = [Gaussian] * tr_block.shape[1]
images = faces['images'] # images = images * 256 # images = add_poisson_noise(images) # images = standardize(images) # Learn cspns for image blocks like this: # |0|1| # |2|3| # P0(0|labels) # P1(1|0,labels) # P2(2|1,0,labels) # P3(3|2,1,0,labels) datasets = [ # block of 0 get_blocks(images, num_blocks=(2, 2), blocks=[0]), # block of 1|0 get_blocks(images, num_blocks=(2, 2), blocks=[1, 0]), # block of 2|1,0 get_blocks(images, num_blocks=(2, 2), blocks=[2, 1, 0]), # block of 3|2,1,0 get_blocks(images, num_blocks=(2, 2), blocks=[3, 2, 1, 0]) ] cspns = [] mpe_query_blocks = None sample_query_blocks = None for i, (tr_block, block_idx) in enumerate(datasets): if i == 0: # spn ds_context = Context(meta_types=[MetaType.REAL] * tr_block.shape[1])