# the out here should be the shape: data_size * nclasses gmm_mdl_class_hat = np.argmax(gmm_mdl_out, axis=0) + 1 istrue_gmm_mdl = gmm_mdl_class_hat == int(true_class) print( "GMM-HMM -- ", data_file, "Done ...", "{}/{}".format(str(istrue_gmm_mdl.sum()), str(istrue_gmm_mdl.shape[0]))) ####################################################### # Get the predicted class values for NVP-HMM ####################################################### # zero pad data for batch training max_len_ = max(l) x_padded = pad_data(X, max_len_) batchdata = DataLoader(dataset=TheDataset( x_padded, lengths=l, device=nvp_mdl_loaded.hmms[0].device), batch_size=batch_size_, shuffle=True) nvp_mdl_out_list = [nvp_mdl_loaded.forward(x) for x in batchdata] nvp_mdl_out = torch.cat(nvp_mdl_out_list, dim=1) nvp_mdl_class_hat = torch.argmax(nvp_mdl_out, dim=0) + 1 istrue_nvp_mdl = nvp_mdl_class_hat == int(true_class) print( "NVP-HMM -- ", data_file, "Done ...", "{}/{}".format(str(istrue_nvp_mdl.sum().cpu().numpy()), str(istrue_nvp_mdl.shape[0])))
get_freer_gpu()))) print("Try to push to device: {}".format(device)) mdl.device = device mdl.pushto(mdl.device) break except: # if push error (maybe memory overflow, try again) print("Push to device cuda:{} fail, try again ...") continue print("epoch:{}\tclass:{}\tPush model to {}. Done.".format( epoch_str, iclass_str, mdl.device), file=sys.stdout) # zero pad data for batch training max_len_ = max([x.shape[0] for x in xtrain]) xtrain_padded = pad_data(xtrain, max_len_) traindata = DataLoader(dataset=TheDataset(xtrain_padded, lengths=l, device=mdl.device), batch_size=options["Train"]["batch_size"], shuffle=True) # niter counts the number of em steps before saving a model checkpoint niter = options["Train"]["niter"] # add number of training data in model mdl.number_training_data = len(xtrain) # set model into train mode mdl.train()