def train_1epoch(_model, _train_loader, _optimizer, _loss_func, _epoch, _nb_epochs): print('==> Epoch:[{0}/{1}][training stage]'.format(_epoch, _nb_epochs)) accuracy_list = [] loss_list = [] _model.train() for i, (data, label) in enumerate(_train_loader): label = label.cuda(async=True) input_var = Variable(data).cuda() target_var = Variable(label).cuda().long() output = _model(input_var) loss = _loss_func(output, target_var) loss_list.append(loss) # measure accuracy and record loss prec1 = accuracy(output.data, label) # , topk=(1, 5)) accuracy_list.append(prec1) # print('Top1:', prec1) # compute gradient and do SGD step _optimizer.zero_grad() loss.backward() _optimizer.step() return float(sum(accuracy_list) / len(accuracy_list)), float( sum(loss_list) / len(loss_list)), _model
def train_1epoch(_img_feature_extract_model, _dcnn_model, _train_loader, _optimizer, _loss_func, _epoch, _nb_epochs): print('==> Epoch:[{0}/{1}][training stage]'.format(_epoch, _nb_epochs)) accuracy_list = [] loss_list = [] _img_feature_extract_model.train() _dcnn_model.train() for i, (dat, label, video_name) in enumerate(_train_loader): label = label.cuda() # input_var = Variable(dat).cuda() target_var = Variable(label).cuda().long() print(dat.size()) for batch_idx in range(batch_size): for frame_idx in range(max_frame_num): output = _img_feature_extract_model( dat[batch_idx, :, frame_idx, :].unsqueeze_(0).cuda()) print(output.size()) # output = _model(input_var) loss = _loss_func(output, target_var) loss_list.append(loss.data) accuracy_list.append(accuracy(output.data, label)) # compute gradient and do SGD step _optimizer.zero_grad() loss.backward() _optimizer.step() return float(sum(accuracy_list) / len(accuracy_list)), float( sum(loss_list) / len(loss_list)), _img_feature_extract_model, _dcnn_model
def train_tsn_1epoch(_model, _train_loader, _optimizer, _loss_func, _epoch, _nb_epochs): print('==> Epoch:[{0}/{1}][training stage]'.format(_epoch, _nb_epochs)) accuracy_list = [] loss_list = [] _model.train() for i, (data, label) in enumerate(_train_loader): label = label.cuda() # input_var = Variable(data).cuda() target_var = Variable(label).cuda().long() for i, dat in enumerate(data): input_var = Variable(dat).cuda() if i == 0: output = _model(input_var) else: output = output + _model(input_var) loss = _loss_func(output / 3, target_var) # measure accuracy and record loss prec = accuracy(output.data, label) loss_list.append(loss) accuracy_list.append(float(prec)) # compute gradient and do SGD step _optimizer.zero_grad() loss.backward() _optimizer.step() return float(sum(accuracy_list) / len(accuracy_list)), float( sum(loss_list) / len(loss_list)), _model
def all_frmae_validation_epoch(_model, _val_loader, _optimizer, _loss_func, _epoch, _nb_epochs): print('==> Epoch:[{0}/{1}][validation stage]'.format(_epoch, _nb_epochs)) accuracy_list = [] loss_list = [] _model.eval() with torch.no_grad(): for i, (dat, label) in enumerate(_val_loader): label = label.cuda() input_var = Variable(dat).cuda() target_var = Variable(label).cuda() # compute output output = _model(input_var) loss = _loss_func(output, target_var) loss_list.append(loss.data) accuracy_list.append(accuracy(output.data, label)) return float(sum(accuracy_list) / len(accuracy_list)), float( sum(loss_list) / len(loss_list)), _model