def load_model(opt):
    global model
    global device
    global idx2label

    print('Loading checkpoint from %s' % opt.model)
    checkpoint = torch.load(opt.model,
                            map_location=lambda storage, loc: storage)
    print('Loading vocab from checkpoint at %s' % opt.model)
    vocab = checkpoint['vocab']
    idx2label = {v: k for k, v in vocab.items()}

    print('Building model...')
    if opt.gpu == -1:
        device = torch.device("cpu")
    else:
        device = torch.device("cuda", opt.gpu)
    model = CNN(len(vocab))

    # end of patch for backward compatibility
    print("Loading model parameters from checkpoint...")
    model.load_state_dict(checkpoint['model'], strict=False)
    model.to(device)
    model.eval()
        self.errors: List[Error] = []

    def parse(self, html: bytes) -> List[Error]:
        page = self._parser(html)
        items = page.find_all(**ERROR_FEEDBACK)
        for it in items:
            self.errors.append(Error(it.text))

        return self.errors


if __name__ == "__main__":
    model = CNN()
    model.load("checkpoints/0228_ori/model.pth")
    model.cuda()
    model.eval()

    target_path = "dataset/crawled_data"
    if not os.path.exists(target_path):
        os.makedirs(target_path)
        os.makedirs(os.path.join(target_path, "ori"))

    crawled_data = []
    image_cnt = 1
    error_cnt = 1
    num_runs = 10000

    for _ in tqdm(range(num_runs)):
        client = HTTPRequest()
        error_feedback = ErrorFeedback()