コード例 #1
0
class Decoder():
    def __init__(self,path):
        self.model = Autoencoder().float()
        # self.model.eval()
        checkpoint = load_checkpoint(path)
        self.model.load_state_dict(checkpoint['model_state'])
        self.model.eval()

    def decompress(self, in_path, out_path):
        dw = dh = y = S2 = S3 = None
        

        with lzma.open(in_path, 'rb') as fp:
            dw = int.from_bytes(fp.read(1), byteorder='big', signed=False)
            dh = int.from_bytes(fp.read(1), byteorder='big', signed=False)
            S2 = int.from_bytes(fp.read(2), byteorder='big', signed=False)
            S3 = int.from_bytes(fp.read(2), byteorder='big', signed=False)

            y = np.empty((1,128,S2,S3)).ravel()
            temp = None;
            j = 0

            print('reading matrix')
            byte = fp.read(1)
            while byte != b"":
                temp = BitArray(byte).bin
                # print(temp)
                for i in range(len(temp)):
                    y[j] = int(temp[i])
                    j += 1
                byte =  fp.read(1)
        

        y[y<0.5] = -1
        y = torch.from_numpy(y.reshape(1,128,S2,S3)).float()

        output = self.model.dec(y)
        img = TF.to_pil_image(output.squeeze(0))

        width, height = img.size
        img = img.crop((dw,dh,width,height));
        img.save(out_path, "PNG")
        return y
コード例 #2
0
def find_similar(fen, num=1, similarity_function=nearest):
    """
    Finds x smallest values ​​in a table.
    :param fen: String with chess position in Fen notation
    :param num: Number of games to be found
    :param similarity_function: Function that measures the similarity of vectors
    :return: list of games with similar positions
    """
    coder = Autoencoder(settings.BOARD_SHAPE, settings.LATENT_SIZE).to(
        settings.DEVICE
    )
    coder.load_state_dict(torch.load(settings.CODER_PATH, map_location=settings.DEVICE))
    coder = coder.coder
    coder.eval()
    inf = Inference(settings.DEVICE, coder)
    target = inf.predict([fen]).tolist()[0]
    conn = sqlite3.connect(settings.DATABASE)
    cursor = conn.cursor()
    cursor.execute("select Embeding FROM positions_lite")
    matrix = cursor.fetchall()
    matrix = [json.loads(x[0])[0] for x in matrix]
    scores = similarity_function(matrix, target)
    idx = find_lowest(scores, num)
    return Games(find_game(idx, cursor))
コード例 #3
0
def main():
    parser = argparse.ArgumentParser(
        description='Simple training script for training model')

    parser.add_argument(
        '--epochs', help='Number of epochs (default: 75)', type=int, default=75)
    parser.add_argument(
        '--batch-size', help='Batch size of the data (default: 16)', type=int, default=16)
    parser.add_argument(
        '--learning-rate', help='Learning rate (default: 0.001)', type=float, default=0.001)
    parser.add_argument(
        '--seed', help='Random seed (default:1)', type=int, default=1)
    parser.add_argument(
        '--data-path', help='Path for the downloaded dataset (default: ../dataset/)', default='../dataset/')
    parser.add_argument(
        '--dataset', help='Dataset name. Must be one of MNIST, STL10, CIFAR10')
    parser.add_argument(
        '--use-cuda', help='CUDA usage (default: False)', type=bool, default=False)
    parser.add_argument(
        '--network-type', help='Type of the network layers. Must be one of Conv, FC (default: FC)', default='FC')
    parser.add_argument(
        '--weight-decay', help='weight decay (L2 penalty) (default: 1e-5)', type=float, default=1e-5)
    parser.add_argument(
        '--log-interval', help='No of batches to wait before logging training status (default: 50)', type=int, default=50)
    parser.add_argument(
        '--save-model', help='For saving the current model (default: True)', type=bool, default=True)

    args = parser.parse_args()

    epochs = args.epochs  # number of epochs
    batch_size = args.batch_size  # batch size
    learning_rate = args.learning_rate  # learning rate
    torch.manual_seed(args.seed)  # seed value

    # Creating dataset path if it doesn't exist
    if args.data_path is None:
        raise ValueError('Must provide dataset path')
    else:
        data_path = args.data_path
        if not os.path.isdir(data_path):
            os.mkdir(data_path)

    # Downloading proper dataset and creating data loader
    if args.dataset == 'MNIST':
        T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])

        train_data = torchvision.datasets.MNIST(
            data_path, train=True, download=True, transform=T)
        test_data = torchvision.datasets.MNIST(
            data_path, train=False, download=True, transform=T)

        ip_dim = 1 * 28 * 28  # input dimension
        h1_dim = int(ip_dim / 2)  # hidden layer 1 dimension
        op_dim = int(ip_dim / 4)  # output dimension
    elif args.dataset == 'STL10':
        T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        train_data = torchvision.datasets.STL10(
            data_path, split='train', download=True, transform=T)
        test_data = torchvision.datasets.STL10(
            data_path, split='test', download=True, transform=T)

        ip_dim = 3 * 96 * 96  # input dimension
        h1_dim = int(ip_dim / 2)  # hidden layer 1 dimension
        op_dim = int(ip_dim / 4)  # output dimension
    elif args.dataset == 'CIFAR10':
        T = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
        ])

        train_data = torchvision.datasets.CIFAR10(
            data_path, train=True, download=True, transform=T)
        test_data = torchvision.datasets.CIFAR10(
            data_path, train=False, download=True, transform=T)

        ip_dim = 3 * 32 * 32  # input dimension
        h1_dim = int(ip_dim / 2)  # hidden layer 1 dimension
        op_dim = int(ip_dim / 4)  # output dimension
    elif args.dataset is None:
        raise ValueError('Must provide dataset')
    else:
        raise ValueError('Dataset name must be MNIST, STL10 or CIFAR10')

    train_loader = DataLoader(train_data, batch_size=batch_size, shuffle=True)
    test_loader = DataLoader(test_data, batch_size=batch_size, shuffle=False)

    # use CUDA or not
    device = 'cpu'
    if args.use_cuda is False:
        if torch.cuda.is_available():
            warnings.warn(
                'CUDA is available, please use for faster convergence')
        else:
            device = 'cpu'
    else:
        if torch.cuda.is_available():
            device = 'cuda'
        else:
            raise ValueError('CUDA is not available, please set it False')

    # Type of layer
    if args.network_type == 'FC':
        auto_encoder = Autoencoder(ip_dim, h1_dim, op_dim).to(device)
    elif args.network_type == 'Conv':
        auto_encoder = ConvolutionAE().to(device)
    else:
        raise ValueError('Network type must be either FC or Conv type')

    # Train the model
    auto_encoder.train()
    criterion = torch.nn.MSELoss()
    optimizer = torch.optim.Adam(
        lr=learning_rate, params=auto_encoder.parameters(), weight_decay=args.weight_decay)

    for n_epoch in range(epochs):  # loop over the dataset multiple times
        reconstruction_loss = 0.0
        for batch_idx, (X, Y) in enumerate(train_loader):
            X = X.view(X.size()[0], -1)
            X = Variable(X).to(device)

            encoded, decoded = auto_encoder(X)

            optimizer.zero_grad()
            loss = criterion(X, decoded)
            loss.backward()
            optimizer.step()

            reconstruction_loss += loss.item()
            if (batch_idx + 1) % args.log_interval == 0:
                print('[%d, %5d] Reconstruction loss: %.5f' %
                      (n_epoch + 1, batch_idx + 1, reconstruction_loss / args.log_interval))
                reconstruction_loss = 0.0
    if args.save_model:
        torch.save(auto_encoder.state_dict(), "Autoencoder.pth")

    # Save real images
    data_iter = iter(test_loader)
    images, labels = data_iter.next()
    torchvision.utils.save_image(torchvision.utils.make_grid(
        images, nrow=4), 'images/actual_img.jpeg')

    # Load trained model and get decoded images
    auto_encoder.load_state_dict(torch.load('Autoencoder.pth'))
    auto_encoder.eval()
    images = images.view(images.size()[0], -1)
    images = Variable(images).to(device)
    encoded, decoded = auto_encoder(images)

    # Save decoded images
    if args.dataset == 'MNIST':
        decoded = decoded.view(decoded.size()[0], 1, 28, 28)
    elif args.dataset == 'STL10':
        decoded = decoded.view(decoded.size()[0], 3, 96, 96)
    elif args.dataset == 'CIFAR10':
        decoded = decoded.view(decoded.size()[0], 3, 32, 32)
    torchvision.utils.save_image(torchvision.utils.make_grid(
        decoded, nrow=4), 'images/decoded_img.jpeg')
コード例 #4
0
def train():
    g = Generator(Zdim)
    g.apply(weights_init)
    print(g)
    # load pretrained Autoencoder
    if opt.ae:
        ae = Autoencoder()
        ae.load_state_dict(torch.load(os.path.join(MODEL_PATH, opt.ae)))

    # custom loss function
    # ==========================
    criterion = MQSymKLLoss(th=opt.threshold)
    # setup optimizer
    # ==========================
    optimizer = optim.Adam(g.parameters(), lr=opt.lr, weight_decay=opt.decay)

    z = torch.FloatTensor(BS, Zdim, 1, 1).normal_(0, 1)
    z_pred = torch.FloatTensor(64, Zdim, 1, 1).normal_(0, 1)
    # cuda
    if cuda:
        g.cuda()
        criterion.cuda()
        z, z_pred = z.cuda(), z_pred.cuda()

    z_pred = Variable(z_pred)

    if opt.ae:
        if cuda:
            ae.cuda()
        ae.eval()
    # load dataset
    # ==========================
    kwargs = dict(num_workers=1, pin_memory=True) if cuda else {}
    dataloader = DataLoader(
        datasets.MNIST(
            'MNIST',
            download=True,
            transform=transforms.Compose([
                transforms.ToTensor()
                # transforms.Normalize((0.1307,), (0.3081,))
            ])),
        batch_size=BS,
        shuffle=True,
        **kwargs)
    N = len(dataloader)
    if opt.history:
        loss_history = np.empty(N * opt.epochs, dtype=np.float32)
    # train
    # ==========================
    for epoch in range(opt.epochs):
        loss_mean = 0.0
        for i, (imgs, _) in enumerate(dataloader):
            if cuda:
                imgs = imgs.cuda()
            imgs = Variable(imgs)

            g.zero_grad()
            # forward & backward & update params
            z.resize_(BS, Zdim, 1, 1).normal_(0, 1)
            zv = Variable(z)
            outputs = g(zv)
            if opt.ae:
                imgs_enc, _ = ae(imgs)
                out_enc, _ = ae(outputs)
                loss = criterion(out_enc, imgs_enc)
            else:
                loss = criterion(outputs, imgs)
            loss.backward()
            optimizer.step()

            loss_mean += loss.data[0]
            if opt.history:
                loss_history[N * epoch + i] = loss.data[0]
            show_progress(epoch + 1, i + 1, N, loss.data[0])

        print('\ttotal loss (mean): %f' % (loss_mean / N))
        # generate fake images
        vutils.save_image(g(z_pred).data,
                          os.path.join(IMAGE_PATH, '%d.png' % (epoch + 1)),
                          normalize=False)
        # normalize=True)
    # save models
    torch.save(g.state_dict(), MODEL_FULLPATH)
    # save loss history
    if opt.history:
        np.save('history' + opt.name, loss_history)
        optimizer = torch.optim.Adam(AE.parameters(), lr=0.1)

        epochs = 1000
        for i in range(epochs):
            optimizer.zero_grad()

            output = AE(data_torch)
            loss = loss_fn(output, data_torch)

            loss.backward()
            optimizer.step()

            if i % 100 == 0:
                print('Epoch {}: {:.4f}'.format(i, loss))

        AE.eval()
        with torch.no_grad():
            data_transformed = AE.encode(data_torch).detach().numpy()

    else:
        raise Exception('Please specify a valid solver!')

    edges2d = []
    for e in edge_list:
        edges2d.append(data_transformed[[e[0], e[1]]])

    # check crosses
    crosses = cross(edges2d, edge_list, findall=True)
    print('{}: {} crosses'.format(method, len(crosses)))

    plt.scatter(data_transformed[:, 0],
コード例 #6
0
            # print statistics
            total_samples = input.numel()
            running_loss += loss.detach().item() * total_samples
            time_step_count += total_samples

            i += 1
            if i % report_every == 0:  # print every 500 mini-batches
                print('[%d, %5d] loss: %.3f' %
                      (epoch, i, running_loss / time_step_count))
                running_loss = 0.0
                time_step_count = 0
            if i % (report_every * 10) == 0:
                # print()
                # print("REPORTING")
                # print()
                model.eval()
                with torch.no_grad():
                    total_loss = 0.
                    count = 0
                    for data in data_stream(valid_filenames[:20],
                                            shuffle=False,
                                            batch_size=32):
                        # get the inputs
                        input = torch.from_numpy(data.astype(
                            np.float32)).cuda()
                        loss = model(input)
                        # print(loss)
                        total_loss += loss.data.item()
                        count += 1
                    valid_loss = total_loss / count
                    if valid_loss < best_loss:
コード例 #7
0
ファイル: interface.py プロジェクト: kazet/KFnrD2021-chess
class Main(Frame):
    def __init__(
        self,
        master,
        color_palette,
        home_path,
        pieces_name,
        pieces_path,
        arrows_path,
        images_sizes,
        selected_piece,
        pieces_padding,
        header_height = 60,
        option_width = 100,
        message_width = 600,
        message_height = 200,
        find_options_width = 600,
        find_options_height = 400,
    ):
        Frame.__init__(self, master, bg=color_palette[0])
        master.rowconfigure(0, weight=1)
        master.columnconfigure(0, weight=1)
        self.main = self
        self.coder_set = False
        self.home_path = home_path
        self.current_pages = 0
        self.pages_fens = ["rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1"]
        self.pieces_name = pieces_name
        self.pieces_path = pieces_path
        self.arrows_path = arrows_path
        self.images_sizes = images_sizes
        self.selected_piece = selected_piece
        self.pieces_padding = pieces_padding
        self.color_palette = color_palette
        self.entering = True
        self.follow_fen = True
        self.fen_placement = "rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR"
        self.fen_player = "w"
        self.fen_castling = "KQkq"
        self.header_height = header_height
        self.option_width = option_width
        self._create_widgets(message_width, message_height, find_options_width, find_options_height)
        self.bind("<Configure>", self._resize)
        self.winfo_toplevel().minsize(600, 600)
        self.display_fen()
        self.coder = None
        self.games = None
        self.store_games = None
        self.lichess_set = False
        self.coder_launcher = None
        self.set_fen("rnbqkbnr/pppppppp/8/8/8/8/PPPPPPPP/RNBQKBNR w KQkq - 0 1")
        self.set_coder(settings.CODER_PATH)

    def _create_widgets(self, message_width, message_height,find_options_width, find_options_height):
        self.board_box = BoardBox(self)
        self.option_box = Options(self, self.option_width)
        self.header = Header(self, header_height=self.header_height)
        self.pgn_box = PGNOptions(self, self.option_width)
        self.tensor_message = TensorDisplayer(self, message_width, message_height)
        self.find_option = FindOptions(self,find_options_width, find_options_height)
        self.lichess_set_option = LichessSetOptions(self,find_options_width,find_options_height)

        self.board_box.grid(row=1, column=0, sticky=N + S + E + W)
        self.option_box.grid(row=1, column=1, sticky=N + S + E + W)
        self.header.grid(row=0, column=0, columnspan=2, sticky=N + S + E + W)

        self.rowconfigure(1, weight=1)
        self.columnconfigure(0, weight=1)

    def show_lichess_options(self):
        self.lichess_set_option.place(relx=0.5, rely=0.5, anchor=CENTER)
        self.find_option.place_forget()
    
    def set_lichess(self,name,n_games):
        try:
            with urlopen("https://lichess.org/api/games/user/{}?max={}&perfType=ultraBullet,bullet,blitz,rapid,classical,correspondence".format(name, n_games)) as pgn:
                self.games = pgn_games(pgn,n_games)
                self.lichess_set_option.place_forget()
                self.header.coder_label["text"]="Account set"
                self.follow_fen = True
                self.entering = False
                self.lichess_set = True
                self.set_fen()
                self.option_box.grid_forget()
                self.pgn_box.grid(row=1, column=1, sticky=N + S + E + W)
                self.pgn_box.set_game_number()
        except:
            self.header.display_fen("Lichess Account couldn't be set", "", "")
    
    def _resize(self, event):
        """Modify padding when window is resized."""
        w, h = event.width, event.height
        self.rowconfigure(1, weight=h - self.header_height)
        self.columnconfigure(0, weight=w - self.option_width)

    def display_fen(self):
        self.header.display_fen(self.fen_placement, self.fen_player, self.fen_castling)
        if self.follow_fen:
            self.pages_fens[self.current_pages] = " ".join(
                [self.fen_placement, self.fen_player, self.fen_castling, "- 0 0"]
            )

    def set_fen(self, fen=None):
        if self.entering == False:
            fen = self.games.board.fen()
            split_fen = fen.split()
            self.fen_placement = split_fen[0]
            self.fen_player = split_fen[1]
            self.fen_castling = split_fen[2]
            self.board_box.board.set_board(self.fen_placement)
            if self.follow_fen:
                self.pages_fens[self.current_pages] = fen
                self.display_fen()
            return
        try:
            a = chess.Board(fen)
            fen = a.fen()
            del a
            split_fen = fen.split()
            self.fen_placement = split_fen[0]
            self.fen_player = split_fen[1]
            self.fen_castling = split_fen[2]
            self.option_box.set_option(self.fen_player, self.fen_castling)
            self.board_box.board.set_board(self.fen_placement)
            self.pages_fens[self.current_pages] = fen
        except ValueError:
            self.header.display_fen("Incorrect fen", "", "")

    def set_coder(self, filename):
        try:
            self.coder = Autoencoder(settings.BOARD_SHAPE, settings.LATENT_SIZE).to(
                settings.DEVICE
            )
            self.coder.load_state_dict(torch.load(filename, map_location=settings.DEVICE))
            self.coder = self.coder.coder
            self.coder.eval()
            self.coder_launcher = Inference(
                settings.DEVICE,
                self.coder,
            )
            self.coder_set = True
            self.header.coder_label["text"]="Coder Set"
            return True
        except:
            return False

    def show_find_option(self):
        if self.coder_set:
            self.find_option.place(relx=0.5, rely=0.5, anchor=CENTER)
            self.lichess_set_option.place_forget()
        else:
            self.header.coder_label["text"]="Set Coder first"

    def run_coder(self,number,comparison):
        if self.coder_set:
            if self.lichess_set:
                self.store_games = self.games
            output = str(
                self.coder_launcher.predict([self.pages_fens[self.current_pages]])
            )
            self.find_option.place_forget()
            self.display_tensor(output)
            self.games = find_similar(self.pages_fens[self.current_pages], number, similarity_functions[comparison])
            self.entering = False
            self.follow_fen = False
            self.set_fen()
            self.option_box.grid_forget()
            self.pgn_box.grid(row=1, column=1, sticky=N + S + E + W)
            self.pgn_box.set_game_number()


    def exit_pgn_options(self):
        if self.lichess_set:
            if self.follow_fen :
                self.lichess_set = False
            else:
                self.games = self.store_games
                self.follow_fen = True
                self.set_fen()
                self.option_box.grid_forget()
                self.pgn_box.set_game_number()
                return
        self.pgn_box.grid_forget()
        self.option_box.grid(row=1, column=1, sticky=N + S + E + W)
        self.entering = True
        self.follow_fen = True
        self.set_fen(self.pages_fens[self.current_pages])

    def display_tensor(self, message):
        self.tensor_message.set_message(message)
        self.tensor_message.place(relx=0.5, rely=0.5, anchor=CENTER)

    def stop_display_tensor(self):
        self.tensor_message.place_forget()