Exemple #1
0
            model = UNet(1, 1, bilinear=False)
            model.load_state_dict(model_dict["unetState"])
            model = nn.Sequential(OrderedDict([("denoiser", model)]))
            dataOpts = model_dict["dataOpts"]
            log.debug("Model successfully load via torch load: " +
                      str(ARGS.model_path))

    log.info(model)

    if ARGS.visualize:
        sp = signal.signal_proc()
    else:
        sp = None

    if torch.cuda.is_available() and ARGS.cuda:
        model = model.cuda()

    model.eval()

    sr = dataOpts['sr']
    fmin = dataOpts["fmin"]
    fmax = dataOpts["fmax"]
    n_fft = dataOpts["n_fft"]
    hop_length = dataOpts["hop_length"]
    n_freq_bins = dataOpts["n_freq_bins"]

    log.debug("dataOpts: " + str(dataOpts))

    sequence_len = int(ceil(ARGS.sequence_len * sr))

    hop = sequence_len
Exemple #2
0
o_train_dataloader = DataLoader(o_train_dataset,
                                batch_size=int(args.batch_size),
                                shuffle=True,
                                num_workers=0)
o_val_dataloader = DataLoader(o_val_dataset,
                              batch_size=int(args.batch_size),
                              shuffle=True,
                              num_workers=0)

# Load the model
unet = UNet()
segmenter = segmenter()
domain_pred = domain_predictor(2)

if cuda:
    unet = unet.cuda()
    segmenter = segmenter.cuda()
    domain_pred = domain_pred.cuda()

# Make everything parallelisable
unet = nn.DataParallel(unet)
segmenter = nn.DataParallel(segmenter)
domain_pred = nn.DataParallel(domain_pred)

if LOAD_PATH_UNET:
    print('Loading Weights')
    encoder_dict = unet.state_dict()
    pretrained_dict = torch.load(LOAD_PATH_UNET)
    pretrained_dict = {
        k: v
        for k, v in pretrained_dict.items() if k in encoder_dict