def model_test(): args = parse_args() torch.set_grad_enabled(True) args.dropout = 0 args.cuda = False dataset = DatasetBase(args) print("Data loaded.") x, y = dataset.__getitem__(0) model = Net(args) result1 = model.forward(x.type(torch.int64)) result2 = model.forward(rearrange(x, 0, 1).type(torch.int64)) return result1, result2
def gaussian_correlation(self, x1, x2): """ 计算高斯卷积 """ if self._hog_features: c = np.zeros((self.size_patch[0], self.size_patch[1]), np.float32) for i in range(self.size_patch[2]): x1aux = x1[i, :].reshape( (self.size_patch[0], self.size_patch[1])) x2aux = x2[i, :].reshape( (self.size_patch[0], self.size_patch[1])) caux = cv2.mulSpectrums(fftd(x1aux), fftd(x2aux), 0, conjB=True) caux = real(fftd(caux, True)) c += caux c = rearrange(c) else: c = cv2.mulSpectrums(fftd(x1), fftd(x2), 0, conjB=True) # 'conjB=' 是必要的 c = fftd(c, True) c = real(c) c = rearrange(c) if x1.ndim == 3 and x2.ndim == 3: d = (np.sum(x1[:, :, 0] * x1[:, :, 0]) + np.sum(x2[:, :, 0] * x2[:, :, 0]) - 2.0 * c) / (self.size_patch[0] * self.size_patch[1] * self.size_patch[2]) elif x1.ndim == 2 and x2.ndim == 2: d = (np.sum(x1 * x1) + np.sum(x2 * x2) - 2.0 * c) / ( self.size_patch[0] * self.size_patch[1] * self.size_patch[2]) d = d * (d >= 0) d = np.exp(-d / (self.sigma * self.sigma)) return d
# make sure correct location was chosen print XC[iy0c:iy1c + 1, ixc:ixc + 2] print YC[iy0c:iy1c + 1, ixc] print DXC[iy0c:iy1c + 1, ixc] # loop over hours for hour in range(start, end + 1): # file to save record to savefile = savedir + "rec_{:010d}.npz".format(hour) # if this step is required if (not os.path.isfile(savefile)) or force_overwrite: step = hour * 3600 / dt print hour, step # read variables U = rdmds(diagdir + 'trsp_3d_set1', step, rec=0, lev=0) V = rdmds(diagdir + 'trsp_3d_set1', step, rec=1, lev=0) U, V = utils.rearrange_velocities(U, V) h = rdmds(diagdir + 'state_2d_set1', step, rec=0) h = utils.rearrange(h) # save to file np.savez(savefile, h=h[iy0c:iy1c + 1, ixc:ixc + 2], u=U[iy0c:iy1c + 1, ixc:ixc + 3], v=V[iy0c:iy1c + 2, ixc:ixc + 2])
f_fbank_train = data_dir + 'fbank/train.ark' f_fbank_test = data_dir + 'fbank/test.ark' f_mfcc_train = data_dir + 'mfcc/train.ark' f_mfcc_test = data_dir + 'mfcc/test.ark' f_train_label = data_dir + 'label/train.lab' f_phone2phone = data_dir + 'phones/48_39.map' f_phone2char = data_dir + '48phone_char.map' # load map phone2phone, phone2char, phone2idx = load_phone_map(f_phone2phone, f_phone2char) # load train data_X, data_X_id = load_data(f_fbank_train, delimiter=' ', dtype='float32') data_Y, data_Y_id = load_data(f_train_label, delimiter=',', dtype='str') data_X, data_X_id = rearrange(data_X, data_X_id, data_Y_id) # load test test_X, test_X_id = load_data(f_fbank_test, delimiter=' ', dtype='float32') # to 39 phone to idx to one-hot for idx in range(len(data_Y)): data_Y[idx] = np.vectorize(phone2phone.get)(data_Y[idx]) data_Y[idx] = np.vectorize(phone2idx.get)(data_Y[idx]) data_Y[idx] = np.eye(48)[data_Y[idx].reshape(-1)] # padding max_squ_len = np.array([len(d) for d in data_X] + [len(t) for t in test_X]).max() print('max_squ_len:{}'.format(max_squ_len)) data_X = np.array([pad(x, (max_squ_len, x.shape[1])) for x in data_X])