def main(pole_strs): new_strs = copy.deepcopy(pole_strs) new_cols = utils.adjust_cols_from_rows(new_strs) all_pole_elems = set() for Str in new_strs: for elem in Str: all_pole_elems.add(elem) solved = len({'x', 'X', '0', 0} & all_pole_elems) == 0 something_marked = False while not solved: for x in range(9): for y in range(9): the_elem = new_strs[x][y] neighbors = set(new_strs[x] + new_cols[y]) neighbors.update(utils.get_square_elements(x, y, new_strs)) neighbors -= {'x', 'X', '0', 0} if len(neighbors) == 8 and not ('1' <= str(the_elem) <= '9'): something_marked = True mark = ({str(i) for i in range(1, 10)} ^ neighbors).pop() new_strs[x][y] = mark print(f'marked ad {mark}') all_pole_elems = set() for Str in new_strs: for elem in Str: all_pole_elems.add(elem) solved = len({'x', 'X', '0', 0} & all_pole_elems) == 0 utils.write_to_txt(new_strs, 'thordo.txt') new_cols = utils.adjust_cols_from_rows(new_strs) if not something_marked: break if not something_marked: print('unsolveable')
def test_net(net, model_path, cfgs): logging.info("Start testing network...") device = cfgs.get("test", "device") display = cfgs.getint("test", "display") root = cfgs.get("test", "root") filelist = cfgs.get("test", "imagelist") mlog = utils.MetricLogger(delimiter=" ") net.eval() net = net.to(device) with torch.no_grad(): res = [] mlog.clear() imagelist = default_list_reader(filelist) transform = default_transform() for imgpath, label in mlog.log_every(imagelist, display, "test"): img = Image.open(os.path.join(root, imgpath)) inputs = torch.as_tensor(transform(img)[np.newaxis, :]) inputs = inputs.to(device) # forward outputs = net(inputs) outputs = F.softmax(outputs, dim=1) res.append([imgpath] + outputs.cpu().numpy().tolist()[0] + [label]) mlog.synchronize_between_processes() res_path = model_path.replace(".pt", "_%s" % filelist.split('/')[-1]) logging.info(" * Writing results to %s" % res_path) utils.write_to_txt(res_path, res)
def generate_txts(dataset_dir, split_size=1 / 8): """Split train_val list into train and validation lists randomly. args: split_size: minimum size for the validation dataset """ with open(os.path.join(dataset_dir, 'train_val_list.txt')) as f: image_names = f.read().split('\n') images_by_patient = dict() for image_name in image_names: patient_id_str = image_name[:8] if patient_id_str not in images_by_patient: images_by_patient[patient_id_str] = [] images_by_patient[patient_id_str].append(image_name) # shuffle patients = list(images_by_patient) random.shuffle(patients) # split them into train and val train_list = [] val_list = [] min_val_amount = len(image_names) * split_size for patient in patients: patient_images = images_by_patient[patient] if len(val_list) < min_val_amount: val_list += patient_images else: train_list += patient_images train_list.sort() val_list.sort() # write files utils.write_to_txt(train_list, os.path.join(dataset_dir, 'train_list.txt')) utils.write_to_txt(val_list, os.path.join(dataset_dir, 'val_list.txt')) train_samples = len(train_list) val_samples = len(val_list) total_samples = train_samples + val_samples print('Training and validation lists generated') print('\tTotal images: {}'.format(total_samples)) print('\tTrain samples: {} ({:.2f} %)'.format( train_samples, train_samples / total_samples * 100)) print('\tValidation samples: {} ({:.2f} %)'.format( val_samples, val_samples / total_samples * 100))
def main(difficulty): sudoku_gen.main() field = utils.read_field() new_field = set_difficulty(field, difficulty) utils.write_to_txt(new_field, 'new_field.txt')
def main(): field_rows = generate_field() utils.write_to_txt(field_rows, 'sudokunotsee.txt') see = set_difficulty(field_rows, 1) utils.write_to_txt(see, 'sudokusee.txt')
def main(): field_rows = generate_field() utils.write_to_txt(field_rows, 'field.txt')