def train(): transform = transforms.Compose([ transforms.Resize((356, 356)), transforms.RandomCrop((299, 299)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) train_loader = get_loader( root_folder= "flickt8k/images", annotation_file = "flickt8k/captions.txt", transform= transform ) torch.backends.cudnn.benchmark = True device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # hyper embed_size = 256 hidden_size= 256 vocab_size = len(dataset.vocab) num_layers = 1 learning_rate = 3e-4 num_epochs=100 model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device) criterion = nn.CrossEntropyLoss(ignore_index=dataset.voca.stoi["<PAD>"]) optimizer = optim.Adam(model.parameters(), lr=learning_rate) model.train() for epoch in range(num_epochs): if save_model: checkpoint = { "state_dict": model.state_dict(), "optimzier": optimizer.state_dict(), "step": step } for idx, (imgs, captions ) in enumerate(train_loader): imgs = imgs.to(device) captions = captions.to(device) outputs = model(imgs, captions[:-1]) # not sending end tpoken loss = criterion(outputs.reshape(-1, output.shape[2]), captions.reshape(-1, output.shape[2])) optimizer.zero_grad() loss.backward(loss) optimizer.step()
def train(): mean = [0.485, 0.456, 0.406] std = [0.229, 0.224, 0.225] transform = transforms.Compose([ transforms.Resize((256, 256)), transforms.ToTensor(), transforms.Normalize(mean, std) ]) data_location = './flickr8k' train_loader, dataset = get_loader( root_folder=data_location + "/Images", annotation_file=data_location + "/captions.txt", transform=transform, num_workers=4, ) torch.backends.cudnn.benchmark = True # Get some boost probaby # device = torch.device("cuda" if torch.cuda.is_available() else "cpu") device = 'cpu' load_model = False save_model = False train_CNN = False #Hyperparameters embed_size = 256 hidden_size = 256 vocab_size = len(dataset.vocab) num_layers = 2 learning_rate = 3e-4 num_epochs = 20 step = 0 # init model, loss model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device) criterion = nn.CrossEntropyLoss(ignore_index=dataset.vocab.stoi["<PAD>"]) optimizer = optim.Adam(model.parameters(), lr=learning_rate) if load_model: step = load_checkpoint( torch.load("../input/checkpoint2-epoch20/my_checkpoint2.pth.tar", map_location='cpu'), model, optimizer) model.train() wanna_print = 100 for epoch in range(num_epochs): if save_model: checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), "step": step } save_checkpoint(checkpoint) for idx, (imgs, captions) in enumerate(train_loader): imgs = imgs.to(device) captions = captions.to(device) # Don't pass the <EOS> outputs = model(imgs, captions[:-1]) # loss accepts only 2 dimension # seq_len, N, vocabulary_size --> (seq_len, N) Each time as its own example print("Outputs shape ", outputs.shape) loss = criterion(outputs.reshape(-1, outputs.shape[2]), captions.reshape(-1)) print("Step", idx, loss.item()) step += 1 optimizer.zero_grad() loss.backward(loss) optimizer.step() if (idx + 1) % wanna_print == 0: print("Epoch: {} loss: {:.5f}".format(epoch, loss.item())) #generate the caption model.eval() with torch.no_grad(): dataiter = iter(train_loader) img, _ = next(dataiter) print(img[0].shape) caps = model.caption_image(img[0:1].to(device), vocabulary=dataset.vocab) caption = ' '.join(caps) show_image(img[0], title=caption) model.train()
def train(): transform = transforms.Compose([ transforms.Resize((356, 356)), transforms.RandomCrop((299, 299)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) train_loader, dataset = get_loader("../data/flickr8k/images/", "../data/flickr8k/captions.txt", transform=transform) torch.backends.cudnn.benchmark = True device = torch.device("cuda" if torch.cuda.is_available() else "cpu") load_model = False save_model = True train_CNN = False # Hyperparameters embed_size = 256 hidden_size = 256 vocab_size = len(dataset.vocab) num_layers = 1 learning_rate = 3e-4 num_epochs = 100 # for tensorboard writer = SummaryWriter("runs/flickr") step = 0 # initialize model, loss etc model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device) criterion = nn.CrossEntropyLoss(ignore_index=dataset.vocab.stoi["<PAD>"]) optimizer = optim.Adam(model.parameters(), lr=learning_rate) # Only finetune the CNN for name, param in model.encoderCNN.inception.named_parameters(): if "fc.weight" in name or "fc.bias" in name: param.requires_grad = True else: param.requires_grad = train_CNN if load_model: step = load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer) model.train() for epoch in range(num_epochs): # Uncomment the line below to see a couple of test cases # print_examples(model, device, dataset) if save_model: checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), "step": step, } save_checkpoint(checkpoint) for idx, (imgs, captions) in tqdm(enumerate(train_loader), total=len(train_loader)): imgs = imgs.to(device) captions = captions.to(device) outputs = model(imgs, captions[:-1]) loss = criterion(outputs.reshape(-1, outputs.shape[2]), captions.reshape(-1)) writer.add_scalar("Training loss", loss.item(), global_step=step) step += 1 optimizer.zero_grad() loss.backward(loss) optimizer.step()
def train(): transform = transforms.Compose([ transforms.Resize((240, 240)), transforms.RandomCrop( (224, 224)), #the input size of inception network transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), ]) train_loader, dataset = get_loader(root_folder="archive/Images", annotation_file="archive/captions.txt", transform=transform, batch_size=128, num_workers=0) #Set some hyperparamters torch.backends.cudnn.benchmark = True #Speed up the training process device = torch.device("cuda" if torch.cuda.is_available() else 'cpu') load_model = False save_model = False train_CNN = False embed_size = 256 hidden_size = 256 vocab_size = len(dataset.vocab) num_layers = 1 learning_rate = 3e-4 num_epochs = 100 #for tensorboard writer = SummaryWriter("runs/flickr") step = 0 #initialize model, loss etc model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device) # Only finetune the CNN for name, param in model.EncoderCNN.inception.named_parameters(): if "fc.weight" in name or "fc.bias" in name: param.requires_grad = True else: param.requires_grad = train_CNN if load_model: step = load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer) criterion = nn.CrossEntropyLoss( ignore_index=dataset.vocab.stoi["<PAD>"]) #对于"<PAD>"的词语不需要计算损失 optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate) scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[60, 120, 140]) model.train() print('Begins') imgs, captions = next(iter(train_loader)) for epoch in range(num_epochs): print_examples(model, device, dataset, save_path='result.txt') if save_model: checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), "step": step } save_checkpoint(checkpoint) # loop = tqdm(enumerate(train_loader),total=len(train_loader),leave=False) total_loss = 0 # for idx, (imgs,captions) in loop: imgs = imgs.to(device) captions = captions.to(device) outputs = model(imgs, captions[:-1]) #EOS标志不需要送进网络训练,我们希望他能自己训练出来 # outputs :(seq_len, batch_size, vocabulary_size), 但是交叉熵损失接受二维的tensor loss = criterion(outputs.reshape(-1, outputs.shape[2]), captions.reshape(-1)) step += 1 optimizer.zero_grad() loss.backward(loss) total_loss += loss.item() optimizer.step() print(total_loss)
vocab = pickle.load(f) load_model = True embed_size = 256 hidden_size = 256 vocab_size = len(vocab) num_layers = 2 learning_rate = 3e-4 print(len(vocab)) model_path = './weights/my_checkpoint2.pth.tar' model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device) criterion = nn.CrossEntropyLoss(ignore_index=vocab.stoi["<PAD>"]) optimizer = optim.Adam(model.parameters(), lr=learning_rate) if load_model: step = load_checkpoint(torch.load(model_path, map_location='cpu'), model, optimizer) model.eval() # image_path = 'flickr8k/Images/54501196_a9ac9d66f2.jpg' image_path = './test_examples/boat.jpg' img = PIL.Image.open(image_path).convert("RGB") img_t = transform(img) caps = model.caption_image(img_t.unsqueeze(0), vocab) # print(caps)
def train(): transform = transforms.Compose([ transforms.Resize((356, 356)), transforms.RandomCrop((299, 299)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) train_loader, dataset = get_loader(root_folder='flickr8k/images/', annotation_file='flickr8k/captions.txt', transform=transform, num_workers=2) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') load_model = False save_model = True embed_size = 256 hidden_size = 256 vocab_size = len(dataset.vocab) num_layers = 1 learning_rate = 3e-4 num_epochs = 100 writer = SummaryWriter('logs/flickr') step = 0 model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device) criterion = nn.CrossEntropyLoss(ignore_index=dataset.vocab.stoi['<PAD>']) optimizer = optim.Adam(model.parameters(), lr=learning_rate) if load_model: step = load_checkpoint(torch.load('my_ckpt.pth.tar'), model, optimizer) model.train() for epoch in range(num_epochs): print_examples(model, device, epoch) if save_model: checkpoint = { 'state_dict': model.state_dict(), 'optimizer': optimizer.state_dict(), 'step': step, } save_checkpoint(checkpoint) for idx, (imgs, captions) in enumerate(train_loader): imgs = imgs.to(device) captions = captions.to(device) outputs = model(imgs, captions)[:-1] loss = criterion(outputs.reshape(-1, outputs.shape[2]), captions.reshape(-1)) writer.add_scalar('loss', loss.item(), global_step=step) step += 1 optimizer.zero_grad() loss.backward() optimizer.step()
def train(): file_path_cap = os.path.join(Constants.data_folder_ann, Constants.captions_train_file) file_path_inst = os.path.join(Constants.data_folder_ann, Constants.instances_train_file) coco_dataloader_train, coco_data_train = get_dataloader( file_path_cap, file_path_inst, "train") file_path_cap = os.path.join(Constants.data_folder_ann, Constants.captions_val_file) file_path_inst = os.path.join(Constants.data_folder_ann, Constants.instances_val_file) coco_dataloader_val, coco_data_val = get_dataloader( file_path_cap, file_path_inst, "val") step = 0 best_bleu4 = 0 # initilze model, loss, etc model = CNNtoRNN(coco_data_train.vocab) model = model.to(Constants.device) criterion = nn.CrossEntropyLoss( ignore_index=coco_data_train.vocab.stoi[Constants.PAD]) optimizer = optim.Adam(model.parameters(), lr=Hyper.learning_rate) ##################################################################### if Constants.load_model: step = load_checkpoint(model, optimizer) for i in range(Hyper.total_epochs): model.train() # Set model to training mode model.decoderRNN.train() model.encoderCNN.train() epoch = i + 1 epochs_since_improvement = 0 print(f"Epoch: {epoch}") if Constants.save_model: checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), "step": step, } save_checkpoint(checkpoint) for _, (imgs, captions) in tqdm(enumerate(coco_dataloader_train), total=len(coco_dataloader_train), leave=False): imgs = imgs.to(Constants.device) captions = captions.to(Constants.device) outputs = model(imgs, captions[:-1]) # forward pass vocab_size = outputs.shape[2] outputs1 = outputs.reshape(-1, vocab_size) captions1 = captions.reshape(-1) loss = criterion(outputs1, captions1) optimizer.zero_grad() loss.backward(loss) optimizer.step() save_checkpoint_epoch(checkpoint, epoch) # One epoch's validation recent_bleu4 = validate(val_loader=coco_dataloader_val, model=model, criterion=criterion) # Check if there was an improvement is_best = recent_bleu4 > best_bleu4 best_bleu4 = max(recent_bleu4, best_bleu4) if not is_best: epochs_since_improvement += 1 print("\nEpochs since last improvement: %d\n" % (epochs_since_improvement, )) else: epochs_since_improvement = 0
def train(): transform = transforms.Compose([ transforms.Resize((356, 356)), transforms.RandomCrop((299, 299)), # CNN takes input 299 x 299 transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) train_loader, dataset = get_loader( root_folder='flickr8k/images', annotation_file='flickr8k/captions.txt', transform=transform, num_workers=2, ) # model configuration torch.backends.cudnn.benchmark = True device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') load_model = False save_model = False train_CNN = False # Hyperparameters ## We can increase capacity embed_size = 256 hidden_size = 256 vocab_size = len(dataset.vocab) num_layers = 1 laerning_rate = 3e-4 num_epochs = 100 # for tensorboard writer = SummaryWriter('runs/flickr') step = 0 # initialize model, loss etc model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device) criterion = nn.CrossEntropyLoss(ignore_index=dataset.vocab.stoi["<PAD>"]) optimizer = optim.Adam(model.parameters(), lr=learning_rate) if load_model: step = load_checkpoint( torch.load('my_checkpoint.pth.tar'), model, optimizer ) # we're returning step here so that the loss fucntions continues where it ended model.train() for epoch in range(num_epochs): print_examples(model, device, dataset) if save_model: checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), "step": step, } save_checkpoint(checkpoint) for idx, (imgs, captions) in enumerate(train_loader): imgs = imgs.to(device) captions = captions.to(device) outputs = model( imgs, captions[:-1] ) # we actually learn to predict the end token so we're not going to send in the end token loss = criterion( outputs.reshape(-1, outputs.shape[2]), captions.reshape(-1) ) #predicting for each example we're predicting for a bunch of different time steps # example , 20 words that it's predicting and then each word has its logit corresponding to each word in the vocabulary right here. ## so we have three dimensions here , but the criterion only 2 dimension ### output -> (seq_len, N, vocabulary_size) , target -> (seq_len , N) writer.add_scalar("Training loss", loss.item(), global_step=step) step += 1 optimizer.zero_grad() loss.backward(loss) optimizer.step()
def train(): transform = transforms.Compose([ transforms.Resize((356, 356)), transforms.RandomCrop((299, 299)), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)), ]) train_loader, dataset = get_loader( root_folder= "/mnt/liguanlin/DataSets/ImageCaptionDatasets/flickr8k/images", annotation_file= "/mnt/liguanlin/DataSets/ImageCaptionDatasets/flickr8k/captions.txt", transform=transform, num_workers=2, ) torch.backends.cudnn.benchmark = True device = torch.device("cuda" if torch.cuda.is_available() else "cpu") load_model = False save_model = True #Hyperparameters embed_size = 256 hidden_size = 256 vocab_size = len(dataset.vocab) num_layers = 1 learning_rate = 3e-4 num_epochs = 100 #for tensorboard writer = SummaryWriter("runs/flickr") step = 0 #initialize model, loss etc model = CNNtoRNN(embed_size, hidden_size, vocab_size, num_layers).to(device) criterion = nn.CrossEntropyLoss(ignore_index=dataset.vocab.stoi["<PAD>"]) optimizer = optim.Adam(model.parameters(), lr=learning_rate) if load_model: step = load_checkpoint(torch.load("my_checkpoint.pth.tar"), model, optimizer) model.train() for epoch in range(num_epochs): print_examples(model, device, dataset) if save_model: checkpoint = { "state_dict": model.state_dict(), "optimizer": optimizer.state_dict(), "step": step, } save_checkpoint(checkpoint) for idx, (imgs, captions) in enumerate(train_loader): imgs = imgs.to(device) captions = captions.to(device) outputs = model(imgs, captions[:-1]) loss = criterion(outputs.reshape(-1, outputs.shape[2]), captions.reshape(-1)) #record loss writer.add_scalar("Training loss", loss.item(), global_step=step) step += 1 optimizer.zero_grad() loss.backward() optimizer.step()