def train(model, criterion, optimizer, loader, epoch, args): model.train() loss = 0.0 acc = 0.0 num_samples = 0 bar = progressbar.ProgressBar(max_value=len(loader)) for idx, data_batch in enumerate(loader): subj_batch_var = data_batch["subject"]["embedding"].cuda() obj_batch_var = data_batch["object"]["embedding"].cuda() predicate = data_batch["predicate"].cuda() label_batch_var = torch.squeeze(data_batch["label"]).cuda() if args.model == "drnet": img = data_batch["bbox_img"].cuda() mask_batch_var = data_batch["bbox_mask"].cuda() output = model(subj_batch_var, obj_batch_var, img, mask_batch_var, predicate) elif args.model == "vtranse": img = data_batch["full_img"].cuda() ts_batch_var = data_batch["subject"]["t"].cuda() to_batch_var = data_batch["object"]["t"].cuda() bboxs_batch_var = data_batch["subject"]["bbox"].cuda() bboxo_batch_var = data_batch["object"]["bbox"].cuda() output = model( subj_batch_var, obj_batch_var, img, ts_batch_var, to_batch_var, bboxs_batch_var, bboxo_batch_var, predicate, ) elif args.model == "vipcnn" or args.model == "pprfcn": img = data_batch["full_img"].cuda() bbox_s = data_batch["subject"]["bbox"].cuda() bbox_o = data_batch["object"]["bbox"].cuda() output = model(img, bbox_s, bbox_o, predicate) loss_batch_var = criterion(output, label_batch_var) loss_batch = loss_batch_var.item() loss += len(data_batch["label"]) * loss_batch acc += num_true_positives(output, label_batch_var) num_samples += len(data_batch["label"]) optimizer.zero_grad() loss_batch_var.backward() optimizer.step() bar.update(idx) loss /= num_samples acc /= num_samples / 100.0 return loss, acc
def train(model, criterion, optimizer, loader, epoch, args): model.train() loss = 0. acc = 0. num_samples = 0 bar = progressbar.ProgressBar(max_value=len(loader)) for idx, data_batch in enumerate(loader): subj_batch_var = data_batch['subject']['embedding'].cuda() obj_batch_var = data_batch['object']['embedding'].cuda() predicate = data_batch['predicate'].cuda() label_batch_var = torch.squeeze(data_batch['label']).cuda() if args.model == 'drnet': img = data_batch['bbox_img'].cuda() mask_batch_var = data_batch['bbox_mask'].cuda() output = model(subj_batch_var, obj_batch_var, img, mask_batch_var, predicate) elif args.model == 'vtranse': img = data_batch['full_img'].cuda() ts_batch_var = data_batch['subject']['t'].cuda() to_batch_var = data_batch['object']['t'].cuda() bboxs_batch_var = data_batch['subject']['bbox'].cuda() bboxo_batch_var = data_batch['object']['bbox'].cuda() output = model(subj_batch_var, obj_batch_var, img, ts_batch_var, to_batch_var, bboxs_batch_var, bboxo_batch_var, predicate) elif args.model == 'vipcnn' or args.model == 'pprfcn': img = data_batch['full_img'].cuda() bbox_s = data_batch['subject']['bbox'].cuda() bbox_o = data_batch['object']['bbox'].cuda() output = model(img, bbox_s, bbox_o, predicate) loss_batch_var = criterion(output, label_batch_var) loss_batch = loss_batch_var.item() loss += (len(data_batch['label']) * loss_batch) acc += num_true_positives(output, label_batch_var) num_samples += len(data_batch['label']) optimizer.zero_grad() loss_batch_var.backward() optimizer.step() bar.update(idx) loss /= num_samples acc /= (num_samples / 100.) return loss, acc
def train(model, criterion, optimizer, loader, epoch, args): "train for one epoch" model.train() loss = 0.0 acc = 0.0 num_samples = 0 bar = progressbar.ProgressBar(max_value=len(loader)) for idx, data_batch in enumerate(loader): subj_batch_var = data_batch["subject"]["bbox"] obj_batch_var = data_batch["object"]["bbox"] label_batch_var = torch.squeeze(data_batch["label"]).view(-1, 1) predi_batch_var = data_batch["predicate"] if torch.cuda.is_available(): subj_batch_var = subj_batch_var.cuda() obj_batch_var = obj_batch_var.cuda() label_batch_var = label_batch_var.cuda() predi_batch_var = predi_batch_var.cuda() output_var = model(subj_batch_var, obj_batch_var, predi_batch_var) loss_batch_var = criterion(output_var, label_batch_var) loss_batch = loss_batch_var.item() loss += len(data_batch["label"]) * loss_batch acc += num_true_positives(output_var, label_batch_var) num_samples += len(data_batch["label"]) optimizer.zero_grad() loss_batch_var.backward() optimizer.step() bar.update(idx) loss /= num_samples acc /= num_samples / 100.0 return loss, acc
def train(model, criterion, optimizer, loader, epoch, args): 'train for one epoch' model.train() loss = 0. acc = 0. num_samples = 0 bar = progressbar.ProgressBar(max_value=len(loader)) for idx, data_batch in enumerate(loader): subj_batch_var = data_batch['subject']['embedding'] obj_batch_var = data_batch['object']['embedding'] label_batch_var = torch.squeeze(data_batch['label']) predi_batch_var = data_batch['predicate'] if torch.cuda.is_available(): subj_batch_var = subj_batch_var.cuda() obj_batch_var = obj_batch_var.cuda() label_batch_var = label_batch_var.cuda() predi_batch_var = predi_batch_var.cuda() output_var = model(subj_batch_var, obj_batch_var, predi_batch_var) loss_batch_var = criterion(output_var, label_batch_var) loss_batch = loss_batch_var.item() loss += (len(data_batch['label']) * loss_batch) acc += num_true_positives(output_var, label_batch_var) num_samples += len(data_batch['label']) optimizer.zero_grad() loss_batch_var.backward() optimizer.step() bar.update(idx) loss /= num_samples acc /= (num_samples / 100.) return loss, acc