def generate_data_check(self): MakePngFile.DIR_NAME = './Chess positions/' file_dict = dict() fen_file = open(MakePngFile.DIR_NAME + 'fen_data.txt', 'r') POSITIONS_DIR = MakePngFile.DIR_NAME + 'positions/' png_files = os.listdir(POSITIONS_DIR) for file_name in png_files: check_jpg_file = file_name.find('.JPG') if check_jpg_file < 0: print("Not jpg:", file_name) continue board_string = fen_file.readline() if len(board_string) <= 0: print("The end.") break board_string = board_string[0:64] file_dict[file_name] = board_string png_file_name = MakePngFile.DIR_NAME + 'Generated files/' + file_name self.generate_png_files(board_string, png_file_name) new_file_name = MakePngFile.DIR_NAME + 'Generated files/Grayscale/' + file_name scale_and_remove_color(POSITIONS_DIR + file_name, new_file_name) # insert_png_into_picture(POSITIONS_DIR + file_name, png_file_name, new_file_name) print(len(file_dict)) json_file_name = MakePngFile.DIR_NAME + 'file_dict.json' fen.dump_dict_to_json_file(file_dict, json_file_name)
def create_json_file(): import Fen_string_manipulations as fen base_dir = './Snapshots/' json_file = base_dir + 'board_strings.json' dump_file = base_dir + 'snapshots.json' pos_dict = fen.fetch_dict_from_json(json_file) correlation_dict = {} for game in pos_dict: pos_dir = base_dir + game + "/" files = os.listdir(pos_dir) for file, pos in zip(files, pos_dict[game]): full_path = pos_dir + file correlation_dict[full_path] = pos fen.dump_dict_to_json_file(correlation_dict, dump_file)
def generate_augmented_data(pickle_data, files_part, corner_data, fen_dict, num_of_copies, train): x_data = [] y_data = [] counter = 0 for file_name in files_part: np_array, coords, corner_pos = pickle_data[file_name] counter += 1 if counter % 30 == 0: print("Processed " + str(counter) + "/" + str(len(corner_data))) image = Image.fromarray(np_array) fen_string = fen_dict[file_name] fen_string = fen.rotate_board_multiple(fen_string, (corner_pos + 3) % 4) for _ in range(num_of_copies): if train: image, coords = cbm.random_rotate_and_translate(image, coords) x_image_list, y_image_list = cbm.create_subimages( image, coords, fen_string, train) x_data += x_image_list y_data += y_image_list image.close() # print('Out of generate_augmented_data()') return (x_data, y_data)
def read_board_coord(self): json_file_name = './Chess positions/coordinates.json' file_dict = fen.fetch_dict_from_json(json_file_name) for img in file_dict: array = file_dict[img] coord_matrix = cbm.get_coord_matrix(array) center_spots, scale_matrix = cbm.center_coordinates(coord_matrix) insert_kings_into_picture(img, center_spots, scale_matrix)
def fetch_data(self): self.snapshots_json = fen.fetch_dict_from_json( './Snapshots/snapshots_table_corners.json') # self.jpg_files = [] # for file in snapshots_json: # if file.find('Petrosian-Geller')>=0: # self.jpg_files.append(file) self.jpg_files = list(self.snapshots_json) self.jpg_files.sort() self.current_file_num = 0
def fetch_corner_data(train=True): if train: json_file_name = Options.train_file num_of_copies = Options.num_of_copies else: json_file_name = Options.test_file num_of_copies = 1 data_dict = fen.fetch_dict_from_json(json_file_name) data_list = [] for key in data_dict: data_list.append([key, data_dict[key][0], data_dict[key][1]]) return data_list, num_of_copies
def fetch_data_list(train=True): if train: json_file_name = Options.train_file num_of_copies = Options.num_of_copies else: json_file_name = Options.test_file num_of_copies = 1 data_dict = fen.fetch_dict_from_json(json_file_name) data_list = [] for file_name in data_dict: data_list.append([file_name, data_dict[file_name][0], data_dict[file_name][1]]) # print(file_name, data_dict[file_name][0]) return data_list, num_of_copies
def run_piece_network(): piece_net = pmk.PiecePredictor() fen_dict = fen.fetch_dict_from_json(Options.board_string_file) corner_data, num = fetch_corner_data(True) x_train, y_train = image_croping(corner_data, num, fen_dict, True) corner_data, num = fetch_corner_data(False) x_test, y_test = image_croping(corner_data, num, fen_dict, False) piece_net.load_data(x_train, y_train, x_test, y_test) piece_net.train_model() piece_net.evaluate_model() errored = piece_net.show_predicted_data() print_error_data(errored)
def test_data(): data_list, num = cn.fetch_data_list(False) x_test, y_test = cn.generate_data(data_list, num, False) predictor = cmk.CornerPredictor() predictor.load_data(x_test, y_test, x_test, y_test) y_pred = predictor.model.predict(predictor.x_test) corner_pred = (y_pred + cmk.AVG_CORNER_DATA[2]) * 10 / 3 # corner_pred = (y_pred + cmk.AVG_CORNER_DATA[2]) * 5 corner_pred = corner_pred.round().astype(int).reshape(-1, 4, 2).tolist() fen_dict = fen.fetch_dict_from_json(Options.board_string_file) corner_data, num = pn.fetch_corner_data(False) new_corner_data = [] new_corners = [] old_corners = [] for i in range(len(corner_data)): new_corner_data.append( [corner_data[i][0], corner_pred[i], corner_data[i][2]]) old_corners += [np.array(corner_data[i][1]).flatten()] new_corners += [np.array(corner_pred[i]).flatten()] # print("Old corners:", old_corners[i]) # print("New corners:", new_corners[i]) print('MSE:', np.mean(np.square(np.array(old_corners) - np.array(new_corners)))) # x_image, y_image = pn.image_croping(new_corner_data, 1, fen_dict, False) x_image, y_image = pn.image_croping(corner_data, 1, fen_dict, False) image_pred = pmk.PiecePredictor() image_pred.load_data(x_image, y_image, x_image, y_image) y_pred = image_pred.model.predict(image_pred.x_test) y_ = np.argmax(y_pred, axis=1) # strings_array = fen.conv_num_array_to_board_strings(y_) # for expected, predicted in zip(data_list, strings_array): # if fen_dict[expected[0]] != predicted: # print("Expected string: " + fen_dict[expected[0]] + ", predicted: " + predicted) # for i in range(len(y_pred)): # if y_pred[i][y_[i]]<0.99: # print("Error greater than 1%:", y_pred[i].round(3)) print("Accuracy {:.2f}%".format( np.mean(np.equal(y_, y_image.flatten()).astype(int)) * 100))
def create_subimages(image, coords, fen_string, train): fen_to_array = fen.convert_fen_to_array(fen_string) img_scaling = ImageScaling(image, coords) x_data = [] y_data = [] # x_data = np.empty((64, 202, 146)) # y_data = np.empty((64, 1)) for row in range(8): for column in range(8): croped_image = img_scaling.extract_image(row, column, train) x_data += [np.array(croped_image)] index = fen_to_array[column + row*8] y_data += [index] # x_data[column + row*8] = np.array(croped_image) # y_data[column + row*8] = [index] return x_data, y_data