def original_raven(modifiable_attr, answer_AoT, rule_groups, context): candidates = [answer_AoT] answers_imgs = [render_panel(answer_AoT)] answer_score, _ = solve(rule_groups, context, [answer_AoT]) assert answer_score > 0 """Create the negative choices for the original RAVEN dataset""" while len(candidates) < 8: component_idx, attr_name, min_level, max_level = sample_attr(modifiable_attr) new_answer = copy.deepcopy(answer_AoT) new_answer.sample_new(component_idx, attr_name, min_level, max_level, answer_AoT) new_answer_img = render_panel(new_answer) ok = True new_answer_score, _ = solve(rule_groups, context, [new_answer]) if new_answer_score >= answer_score: print 'Warning - Accidentally generated good answer - resampling' ok = False for i in range(0, len(answers_imgs)): if (new_answer_img == answers_imgs[i]).all(): print 'Warning - New answer equals existing image - resampling' ok = False if ok: candidates.append(new_answer) answers_imgs.append(new_answer_img) return candidates, answers_imgs
def fuse(args, all_configs): random.seed(args.seed) np.random.seed(args.seed) acc = 0 for k in trange(args.num_samples * len(all_configs)): if k < args.num_samples * (1 - args.val - args.test): set_name = "train" elif k < args.num_samples * (1 - args.test): set_name = "val" else: set_name = "test" tree_name = random.choice(all_configs.keys()) root = all_configs[tree_name] while True: rule_groups = sample_rules() new_root = root.prune(rule_groups) if new_root is not None: break start_node = new_root.sample() row_1_1 = copy.deepcopy(start_node) for l in range(len(rule_groups)): rule_group = rule_groups[l] rule_num_pos = rule_group[0] row_1_2 = rule_num_pos.apply_rule(row_1_1) row_1_3 = rule_num_pos.apply_rule(row_1_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_1_2 = rule.apply_rule(row_1_1, row_1_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_1_3 = rule.apply_rule(row_1_2, row_1_3) if l == 0: to_merge = [row_1_1, row_1_2, row_1_3] else: merge_component(to_merge[1], row_1_2, l) merge_component(to_merge[2], row_1_3, l) row_1_1, row_1_2, row_1_3 = to_merge row_2_1 = copy.deepcopy(start_node) row_2_1.resample(True) for l in range(len(rule_groups)): rule_group = rule_groups[l] rule_num_pos = rule_group[0] row_2_2 = rule_num_pos.apply_rule(row_2_1) row_2_3 = rule_num_pos.apply_rule(row_2_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_2_2 = rule.apply_rule(row_2_1, row_2_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_2_3 = rule.apply_rule(row_2_2, row_2_3) if l == 0: to_merge = [row_2_1, row_2_2, row_2_3] else: merge_component(to_merge[1], row_2_2, l) merge_component(to_merge[2], row_2_3, l) row_2_1, row_2_2, row_2_3 = to_merge row_3_1 = copy.deepcopy(start_node) row_3_1.resample(True) for l in range(len(rule_groups)): rule_group = rule_groups[l] rule_num_pos = rule_group[0] row_3_2 = rule_num_pos.apply_rule(row_3_1) row_3_3 = rule_num_pos.apply_rule(row_3_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_3_2 = rule.apply_rule(row_3_1, row_3_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_3_3 = rule.apply_rule(row_3_2, row_3_3) if l == 0: to_merge = [row_3_1, row_3_2, row_3_3] else: merge_component(to_merge[1], row_3_2, l) merge_component(to_merge[2], row_3_3, l) row_3_1, row_3_2, row_3_3 = to_merge imgs = [ render_panel(row_1_1), render_panel(row_1_2), render_panel(row_1_3), render_panel(row_2_1), render_panel(row_2_2), render_panel(row_2_3), render_panel(row_3_1), render_panel(row_3_2), np.zeros((IMAGE_SIZE, IMAGE_SIZE), np.uint8) ] context = [ row_1_1, row_1_2, row_1_3, row_2_1, row_2_2, row_2_3, row_3_1, row_3_2 ] modifiable_attr = sample_attr_avail(rule_groups, row_3_3) answer_AoT = copy.deepcopy(row_3_3) candidates = [answer_AoT] for j in range(7): component_idx, attr_name, min_level, max_level = sample_attr( modifiable_attr) answer_j = copy.deepcopy(answer_AoT) answer_j.sample_new(component_idx, attr_name, min_level, max_level, answer_AoT) candidates.append(answer_j) random.shuffle(candidates) answers = [] for candidate in candidates: answers.append(render_panel(candidate)) # imsave(generate_matrix_answer(imgs + answers), "./experiments/fuse/{}.jpg".format(k)) image = imgs[0:8] + answers target = candidates.index(answer_AoT) predicted = solve(rule_groups, context, candidates) meta_matrix, meta_target = serialize_rules(rule_groups) structure, meta_structure = serialize_aot(start_node) np.savez("{}/RAVEN_{}_{}.npz".format(args.save_dir, k, set_name), image=image, target=target, predict=predicted, meta_matrix=meta_matrix, meta_target=meta_target, structure=structure, meta_structure=meta_structure) with open("{}/RAVEN_{}_{}.xml".format(args.save_dir, k, set_name), "w") as f: dom = dom_problem(context + candidates, rule_groups) f.write(dom) if target == predicted: acc += 1 print("Accuracy: {}".format( float(acc) / (args.num_samples * len(all_configs))))
def separate(args, all_configs): if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) for key in all_configs.keys(): if not os.path.exists(os.path.join(args.save_dir, key)): os.mkdir(os.path.join(args.save_dir, key)) random.seed(args.seed) np.random.seed(args.seed) for key in all_configs.keys(): acc = 0 for k in tqdm(range(args.num_samples), key): count_num = k % 10 if count_num < (10 - args.val - args.test): set_name = "train" elif count_num < (10 - args.test): set_name = "val" else: set_name = "test" root = all_configs[key] while True: rule_groups = sample_rules() new_root = root.prune(rule_groups) if new_root is not None: break start_node = new_root.sample() row_1_1 = copy.deepcopy(start_node) for l in range(len(rule_groups)): rule_group = rule_groups[l] rule_num_pos = rule_group[0] row_1_2 = rule_num_pos.apply_rule(row_1_1) row_1_3 = rule_num_pos.apply_rule(row_1_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_1_2 = rule.apply_rule(row_1_1, row_1_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_1_3 = rule.apply_rule(row_1_2, row_1_3) if l == 0: to_merge = [row_1_1, row_1_2, row_1_3] else: merge_component(to_merge[1], row_1_2, l) merge_component(to_merge[2], row_1_3, l) row_1_1, row_1_2, row_1_3 = to_merge row_2_1 = copy.deepcopy(start_node) row_2_1.resample(True) for l in range(len(rule_groups)): rule_group = rule_groups[l] rule_num_pos = rule_group[0] row_2_2 = rule_num_pos.apply_rule(row_2_1) row_2_3 = rule_num_pos.apply_rule(row_2_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_2_2 = rule.apply_rule(row_2_1, row_2_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_2_3 = rule.apply_rule(row_2_2, row_2_3) if l == 0: to_merge = [row_2_1, row_2_2, row_2_3] else: merge_component(to_merge[1], row_2_2, l) merge_component(to_merge[2], row_2_3, l) row_2_1, row_2_2, row_2_3 = to_merge row_3_1 = copy.deepcopy(start_node) row_3_1.resample(True) for l in range(len(rule_groups)): rule_group = rule_groups[l] rule_num_pos = rule_group[0] row_3_2 = rule_num_pos.apply_rule(row_3_1) row_3_3 = rule_num_pos.apply_rule(row_3_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_3_2 = rule.apply_rule(row_3_1, row_3_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_3_3 = rule.apply_rule(row_3_2, row_3_3) if l == 0: to_merge = [row_3_1, row_3_2, row_3_3] else: merge_component(to_merge[1], row_3_2, l) merge_component(to_merge[2], row_3_3, l) row_3_1, row_3_2, row_3_3 = to_merge imgs = [render_panel(row_1_1), render_panel(row_1_2), render_panel(row_1_3), render_panel(row_2_1), render_panel(row_2_2), render_panel(row_2_3), render_panel(row_3_1), render_panel(row_3_2), np.zeros((IMAGE_SIZE, IMAGE_SIZE), np.uint8)] context = [row_1_1, row_1_2, row_1_3, row_2_1, row_2_2, row_2_3, row_3_1, row_3_2] answer_AoT = copy.deepcopy(row_3_3) modifiable_attr = sample_attr_avail(rule_groups, answer_AoT) if not args.fair: candidates, answers_imgs = original_raven(modifiable_attr, answer_AoT, rule_groups, context) else: candidates, answers_imgs = fair_raven(modifiable_attr, answer_AoT, rule_groups, context) if args.save: zipped = list(zip(candidates, answers_imgs)) random.shuffle(zipped) candidates, answers_imgs = zip(*zipped) image = imgs[0:8] + list(answers_imgs) target = candidates.index(answer_AoT) _, predicted = solve(rule_groups, context, candidates) meta_matrix, meta_target = serialize_rules(rule_groups) structure, meta_structure = serialize_aot(start_node) np.savez_compressed("{}/{}/RAVEN_{}_{}.npz".format(args.save_dir, key, k, set_name), image=image, target=target, predict=predicted, meta_matrix=meta_matrix, meta_target=meta_target, structure=structure, meta_structure=meta_structure) with open("{}/{}/RAVEN_{}_{}.xml".format(args.save_dir, key, k, set_name), "w") as f: dom = dom_problem(context + list(candidates), rule_groups) f.write(dom)
def fair_raven(modifiable_attr, answer_AoT, rule_groups, context): candidates = [answer_AoT] answers_imgs = [render_panel(answer_AoT)] answer_score, _ = solve(rule_groups, context, [answer_AoT]) assert answer_score > 0 """Create the negative choices for the balanced RAVEN-FAIR dataset""" attrs = [modifiable_attr] idxs = [] blacklist = [[]] try: while len(candidates) < 8: while True: indices = random.sample(range(len(candidates)), k=len(candidates)) timeout_flag = False for idx in indices: if len(attrs[idx]) > 0: timeout_flag = True break if timeout_flag: break print 'No option to continue' raise Exception('No option to continue') attr_i = attrs[idx] candidate_i = candidates[idx] blacklist_i = blacklist[idx] component_idx, attr_name, min_level, max_level = sample_attr(attr_i) try: with timeout(5): new_answer = copy.deepcopy(candidate_i) new_answer.sample_new(component_idx, attr_name, min_level, max_level, candidate_i) new_attr = sample_attr_avail(rule_groups, new_answer) except Exception as e: print 'Attempt to sample failed - recovering' print(e) print(idxs) print(component_idx, attr_name, min_level, max_level) for attr in attr_i: print(attr) print(blacklist_i) continue new_blacklist = copy.deepcopy(blacklist_i) + [attr_name] for i in reversed(range(len(new_attr))): if new_attr[i][1] in new_blacklist: new_attr.pop(i) new_answer_img = render_panel(new_answer) ok = True new_answer_score, _ = solve(rule_groups, context, [new_answer]) if new_answer_score >= answer_score: print 'Warning - Accidentally generated good answer - resampling' ok = False for i in range(0, len(answers_imgs)): if (new_answer_img == answers_imgs[i]).all(): print 'Warning - New answer equals existing image - resampling' ok = False if ok: idxs.append(idx) candidates.append(new_answer) attrs.append(new_attr) blacklist.append(new_blacklist) answers_imgs.append(new_answer_img) except Exception as e: print(e) raise e return candidates, answers_imgs
def separate(args, all_configs): random.seed(args.seed) np.random.seed(args.seed) for key in all_configs.keys(): acc = 0 for k in trange(args.num_samples): count_num = k % 10 if count_num < (10 - args.val - args.test): set_name = "train" elif count_num < (10 - args.test): set_name = "val" else: set_name = "test" root = all_configs[key] while True: rule_groups = sample_rules() new_root = root.prune(rule_groups) if new_root is not None: break start_node = new_root.sample() row_1_1 = copy.deepcopy(start_node) for l in range(len(rule_groups)): rule_group = rule_groups[l] rule_num_pos = rule_group[0] row_1_2 = rule_num_pos.apply_rule(row_1_1) row_1_3 = rule_num_pos.apply_rule(row_1_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_1_2 = rule.apply_rule(row_1_1, row_1_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_1_3 = rule.apply_rule(row_1_2, row_1_3) if l == 0: to_merge = [row_1_1, row_1_2, row_1_3] else: merge_component(to_merge[1], row_1_2, l) merge_component(to_merge[2], row_1_3, l) row_1_1, row_1_2, row_1_3 = to_merge row_2_1 = copy.deepcopy(start_node) row_2_1.resample(True) for l in range(len(rule_groups)): rule_group = rule_groups[l] rule_num_pos = rule_group[0] row_2_2 = rule_num_pos.apply_rule(row_2_1) row_2_3 = rule_num_pos.apply_rule(row_2_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_2_2 = rule.apply_rule(row_2_1, row_2_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_2_3 = rule.apply_rule(row_2_2, row_2_3) if l == 0: to_merge = [row_2_1, row_2_2, row_2_3] else: merge_component(to_merge[1], row_2_2, l) merge_component(to_merge[2], row_2_3, l) row_2_1, row_2_2, row_2_3 = to_merge row_3_1 = copy.deepcopy(start_node) row_3_1.resample(True) for l in range(len(rule_groups)): rule_group = rule_groups[l] rule_num_pos = rule_group[0] row_3_2 = rule_num_pos.apply_rule(row_3_1) row_3_3 = rule_num_pos.apply_rule(row_3_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_3_2 = rule.apply_rule(row_3_1, row_3_2) for i in range(1, len(rule_group)): rule = rule_group[i] row_3_3 = rule.apply_rule(row_3_2, row_3_3) if l == 0: to_merge = [row_3_1, row_3_2, row_3_3] else: merge_component(to_merge[1], row_3_2, l) merge_component(to_merge[2], row_3_3, l) row_3_1, row_3_2, row_3_3 = to_merge imgs = [render_panel(row_1_1), render_panel(row_1_2), render_panel(row_1_3), render_panel(row_2_1), render_panel(row_2_2), render_panel(row_2_3), render_panel(row_3_1), render_panel(row_3_2), np.zeros((IMAGE_SIZE, IMAGE_SIZE), np.uint8)] context = [row_1_1, row_1_2, row_1_3, row_2_1, row_2_2, row_2_3, row_3_1, row_3_2] modifiable_attr = sample_attr_avail(rule_groups, row_3_3) answer_AoT = copy.deepcopy(row_3_3) candidates = [answer_AoT] attr_num = 3 if attr_num <= len(modifiable_attr): idx = np.random.choice(len(modifiable_attr), attr_num, replace=False) selected_attr = [modifiable_attr[i] for i in idx] else: selected_attr = modifiable_attr mode = None #switch attribute 'Number' for convenience pos = [i for i in range(len(selected_attr)) if selected_attr[i][1]=='Number'] if pos: pos = pos[0] selected_attr[pos], selected_attr[-1] = selected_attr[-1], selected_attr[pos] pos = [i for i in range(len(selected_attr)) if selected_attr[i][1]=='Position'] if pos: mode = 'Position-Number' values = [] if len(selected_attr) >= 3: mode_3 = None if mode == 'Position-Number': mode_3 = '3-Position-Number' for i in range(attr_num): component_idx, attr_name, min_level, max_level, attr_uni = selected_attr[i][0], selected_attr[i][1], selected_attr[i][3], selected_attr[i][4], selected_attr[i][5] value = answer_AoT.sample_new_value(component_idx, attr_name, min_level, max_level, attr_uni, mode_3) values.append(value) tmp = [] for j in candidates: new_AoT = copy.deepcopy(j) new_AoT.apply_new_value(component_idx, attr_name, value) tmp.append(new_AoT) candidates += tmp elif len(selected_attr) == 2: component_idx, attr_name, min_level, max_level, attr_uni = selected_attr[0][0], selected_attr[0][1], selected_attr[0][3], selected_attr[0][4], selected_attr[0][5] value = answer_AoT.sample_new_value(component_idx, attr_name, min_level, max_level, attr_uni, None) values.append(value) new_AoT = copy.deepcopy(answer_AoT) new_AoT.apply_new_value(component_idx, attr_name, value) candidates.append(new_AoT) component_idx, attr_name, min_level, max_level, attr_uni = selected_attr[1][0], selected_attr[1][1], selected_attr[1][3], selected_attr[1][4], selected_attr[1][5] if mode == 'Position-Number': ran,qu = 6, 1 else: ran,qu = 3, 2 for i in range(ran): value = answer_AoT.sample_new_value(component_idx, attr_name, min_level, max_level, attr_uni, None) values.append(value) for j in range(qu): new_AoT = copy.deepcopy(candidates[j]) new_AoT.apply_new_value(component_idx, attr_name, value) candidates.append(new_AoT) elif len(selected_attr) == 1: component_idx, attr_name, min_level, max_level, attr_uni = selected_attr[0][0], selected_attr[0][1], selected_attr[0][3], selected_attr[0][4], selected_attr[0][5] for i in range(7): value = answer_AoT.sample_new_value(component_idx, attr_name, min_level, max_level, attr_uni, None) values.append(value) new_AoT = copy.deepcopy(answer_AoT) new_AoT.apply_new_value(component_idx, attr_name, value) candidates.append(new_AoT) random.shuffle(candidates) answers = [] for candidate in candidates: answers.append(render_panel(candidate)) #imsave(generate_matrix_answer(imgs + answers), "/media/dsg3/hs/RAVEN_image/experiments2/{}/{}.jpg".format(key, k)) image = imgs[0:8] + answers target = candidates.index(answer_AoT) predicted = solve(rule_groups, context, candidates) meta_matrix, meta_target = serialize_rules(rule_groups) structure, meta_structure = serialize_aot(start_node) np.savez("{}/{}/RAVEN_{}_{}.npz".format(args.save_dir, key, k, set_name), image=image, target=target, predict=predicted, meta_matrix=meta_matrix, meta_target=meta_target, structure=structure, meta_structure=meta_structure) with open("{}/{}/RAVEN_{}_{}.xml".format(args.save_dir, key, k, set_name), "wb") as f: dom = dom_problem(context + candidates, rule_groups) f.write(dom) if target == predicted: acc += 1 print("Accuracy of {}: {}".format(key, float(acc) / args.num_samples))