Esempio n. 1
0
            out_data[:, col] /= max_values[col] - min_values[col]

        np.place(out_data[:, col], max_mask, 1.0)
        np.place(out_data[:, col], min_mask, 0.0)
    return out_data


if __name__ == "__main__":
    epochs = 5000
    batch_size = 128

    input_size = 34
    latent_size = 8

    model = DAE(input_size, latent_size)
    model.to('cuda')
    torch.backends.cudnn.benchmark = True

    loss_fn = nn.MSELoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
    DECAY = 0.95
    scheduler = LambdaLR(optimizer, lr_lambda=lambda t: DECAY**t)

    data = pd.read_csv("model/training_test_data.csv")
    data.sample(frac=1, random_state=200)
    data = data.to_numpy()
    size = data.shape[0]

    training_data = data[:int(0.7 * size)]
    validation_data = data[int(0.7 * size):int(0.9 * size)]
    test_data = data[int(0.9 * size):]
Esempio n. 2
0
                    type=str,
                    help="Where to save raw acoustic output")
parser = add_decoder_args(parser)
parser.add_argument('--save-output',
                    action="store_true",
                    help="Saves output of model from test")
args = parser.parse_args()

if __name__ == '__main__':
    torch.set_grad_enabled(False)
    device = torch.device("cuda" if args.cuda else "cpu")
    model = load_model(device, args.model_path, args.cuda)
    denoiser = DAE()
    denoiser.load_state_dict(
        torch.load('./models/denoiser_deepspeech_final.pth'))
    denoiser = denoiser.to(device)
    denoiser.eval()

    if args.decoder == "beam":
        from decoder import BeamCTCDecoder

        decoder = BeamCTCDecoder(model.labels,
                                 lm_path=args.lm_path,
                                 alpha=args.alpha,
                                 beta=args.beta,
                                 cutoff_top_n=args.cutoff_top_n,
                                 cutoff_prob=args.cutoff_prob,
                                 beam_width=args.beam_width,
                                 num_processes=args.lm_workers)
    elif args.decoder == "greedy":
        decoder = GreedyDecoder(model.labels,