def predict(featuresPath, val_lst, classifier, val_loader, c3dWinSize,\ use_gpu): val_keys = [] predictions = [] sigm = nn.Sigmoid() print("Loading validation/test features from disk...") #OFValFeatures = utils.readAllOFfeatures(OFfeaturesPath, test_lst) #HOGValFeatures = utils.readAllHOGfeatures(HOGfeaturesPath, val_lst) valFeatures = utils.readAllPartitionFeatures(featuresPath, val_lst) print("Predicting on the validation/test videos...") for i, (keys, seqs, labels) in enumerate(val_loader): # Testing on the sample #feats = getFeatureVectors(DATASET, keys, seqs) # Parallelize this #batchFeats = utils.getFeatureVectorsFromDump(OFValFeatures, keys, seqs, motion=True) #batchFeats = utils.getFeatureVectorsFromDump(HOGValFeatures, keys, seqs, motion=False) batchFeats = utils.getC3DFeatures(valFeatures, keys, seqs, c3dWinSize) #break # Validation stage inputs, target = utils.make_c3d_variables(batchFeats, labels, c3dWinSize, use_gpu) #inputs, target = utils.make_variables(batchFeats, labels, motion=False) output = classifier(inputs) # of size (BATCHESx(SeqLen-15)) X 1 #pred = output.data.max(1, keepdim=True)[1] # get max value in each row pred_probs = sigm(output.view( output.size(0))).data # get the normalized values (0-1) #correct += pred.eq(target.data.view_as(pred)).cpu().sum() val_keys.append(keys) predictions.append(pred_probs) # append the #if i % 2 == 0: # print('i: {} :: Val keys: {} : seqs : {}'.format(i, keys, seqs)) #keys, pred_probs)) #if (i+1) % 10 == 0: # break print("Predictions done on validation/test set...") return val_keys, predictions
def train(trainFeats, model, datasets_loader, optimizer, scheduler, criterion, \ c3dWinSize, SEQ_SIZE, nEpochs, use_gpu, base_name): global training_stats training_stats = defaultdict() # best_model_wts = copy.deepcopy(model.state_dict()) # best_acc = 0.0 sigm = nn.Sigmoid() for epoch in range(nEpochs): print("-" * 60) print("Epoch -> {} ".format((epoch + 1))) training_stats[epoch] = {} # for each epoch train the model and then evaluate it for phase in ['train']: #print("phase->", phase) dataset = datasets_loader[phase] training_stats[epoch][phase] = {} accuracy = 0 net_loss = 0 if phase == 'train': scheduler.step() model.train(True) elif phase == 'test': #print("validation") model.train(False) for i, (keys, seqs, labels) in enumerate(dataset): # return a BATCH x (SEQ_SIZE-15) x 1 x 4096 if phase == 'train': batchFeats = utils.getC3DFeatures(trainFeats, keys, seqs, c3dWinSize) # elif phase == 'test': # batchFeats = utils.getC3DFeatures(valFeats, keys, seqs) # return the torch.Tensor values for inputs and x, y = utils.make_c3d_variables(batchFeats, labels, c3dWinSize, use_gpu) #print("x type", type(x.data)) preds = model(x) preds = sigm(preds.view(preds.size(0))) loss = criterion(preds, y) #print(preds, y) if torch.__version__ == '1.0.0': net_loss += loss.item() else: net_loss += loss.data[0] # num_ft_vecs sent to RNN = SEQ_SIZE - 15 accuracy += get_accuracy(preds, y, (SEQ_SIZE - (c3dWinSize - 1))) # print("# Accurate : {}".format(accuracy)) # print("Phase : {} :: Batch : {} :: Loss : {} :: Accuracy : {}"\ # .format(phase, (i+1), net_loss, accuracy)) if phase == 'train': optimizer.zero_grad() loss.backward() optimizer.step() # if (i+1) == 500: # break accuracy = fabs(accuracy) / (len(datasets_loader[phase].dataset)) # accuracy = fabs(accuracy)/(BATCH_SIZE*(i+1)) training_stats[epoch][phase]['loss'] = net_loss training_stats[epoch][phase]['acc'] = accuracy training_stats[epoch][phase]['lr'] = optimizer.param_groups[0][ 'lr'] # Display at end of epoch print("Phase : Train :: Epoch : {} :: Loss : {} :: Accuracy : {} : LR : {}"\ .format((epoch+1), training_stats[epoch]['train']['loss'],\ training_stats[epoch]['train']['acc'], \ optimizer.param_groups[0]['lr'])) # print("Phase : Test :: Epoch : {} :: Loss : {} :: Accuracy : {}"\ # .format((epoch+1), training_stats[epoch]['test']['loss'],\ # training_stats[epoch]['test']['acc'])) if ((epoch + 1) % nEpochs) == 0: save_model_checkpoint(base_name, model, epoch+1, "Adam", \ win=SEQ_SIZE, use_gpu=use_gpu) # Save dictionary after all the epochs loss_filename = os.path.join(base_name, \ "losses_GRU_c3dFC7_ep"+str(epoch+1)+"_seq"+str(SEQ_SIZE)+"_Adam.pkl") with open(loss_filename, 'wb') as fr: pickle.dump(training_stats, fr, protocol=pickle.HIGHEST_PROTOCOL)