コード例 #1
0
def validate_model(epoch,
                   dataset,
                   model,
                   criterion,
                   device="cpu",
                   rank=0,
                   world_size=1,
                   shifts=0,
                   split=False):
    indexes = range(rank, len(dataset), world_size)
    tq = tqdm.tqdm(indexes,
                   ncols=120,
                   desc=f"[{epoch:03d}] valid",
                   leave=False,
                   file=sys.stdout,
                   unit=" track")
    current_loss = 0
    for index in tq:
        streams = dataset[index]
        # first five minutes to avoid OOM on --upsample models
        streams = streams[..., :15_000_000]
        streams = streams.to(device)
        sources = streams[1:]
        mix = streams[0]
        estimates = apply_model(model, mix, shifts=shifts, split=split)
        loss = criterion(estimates, sources)
        current_loss += loss.item() / len(indexes)
        del estimates, streams, sources

    if world_size > 1:
        current_loss = average_metric(current_loss, len(indexes))
    return current_loss
コード例 #2
0
def load_encoder():
    # load pre-trained model
    K.clear_session()
    encoder, preprocess_for_model = get_cnn_encoder()

    # extract train features
    train_img_embeds, train_img_fns = utils.apply_model(
        "train2014.zip",
        encoder,
        preprocess_for_model,
        input_shape=(constant.IMG_SIZE, constant.IMG_SIZE))
    # we can download the zip from http://msvocds.blob.core.windows.net/coco2014/train2014.zip

    utils.save_pickle(train_img_embeds, "train_img_embeds.pickle")
    utils.save_pickle(train_img_fns, "train_img_fns.pickle")

    # extract validation features
    val_img_embeds, val_img_fns = utils.apply_model(
        "val2014.zip",
        encoder,
        preprocess_for_model,
        input_shape=(constant.IMG_SIZE, constant.IMG_SIZE))
    utils.save_pickle(val_img_embeds, "val_img_embeds.pickle")
    utils.save_pickle(val_img_fns, "val_img_fns.pickle")

    # sample images for learners
    def sample_zip(fn_in, fn_out, rate=0.01, seed=42):
        np.random.seed(seed)
        with zipfile.ZipFile(fn_in) as fin, zipfile.ZipFile(fn_out,
                                                            "w") as fout:
            sampled = filter(lambda _: np.random.rand() < rate, fin.filelist)
            for zInfo in sampled:
                fout.writestr(zInfo, fin.read(zInfo))

    sample_zip("train2014.zip", "train2014_sample.zip")
    sample_zip("val2014.zip", "val2014_sample.zip")
コード例 #3
0
ファイル: TD3.py プロジェクト: ethanabrooks/jax-rl
 def select_action(self, state):
     return apply_model(self.actor_optimizer.target, state).flatten()
コード例 #4
0
    model = keras.applications.InceptionV3(include_top=False)
    preprocess_for_model = keras.applications.inception_v3.preprocess_input
    model = keras.engine.training.Model(
        model.inputs,
        keras.layers.GlobalAveragePooling2D()(model.output))
    return model, preprocess_for_model


# Load pre-trained model.
K.clear_session()
encoder, preprocess_for_model = get_cnn_encoder()

# Extract train features.
train_img_embeds, train_img_fns = utils.apply_model("train2014.zip",
                                                    encoder,
                                                    preprocess_for_model,
                                                    input_shape=(IMG_SIZE,
                                                                 IMG_SIZE))
utils.save_pickle(train_img_embeds, "train_img_embeds.pickle")
utils.save_pickle(train_img_fns, "train_img_fns.pickle")

# Extract validation features.
val_img_embeds, val_img_fns = utils.apply_model("val2014.zip",
                                                encoder,
                                                preprocess_for_model,
                                                input_shape=(IMG_SIZE,
                                                             IMG_SIZE))
utils.save_pickle(val_img_embeds, "val_img_embeds.pickle")
utils.save_pickle(val_img_fns, "val_img_fns.pickle")

コード例 #5
0
def evaluate(model,
             musdb_path,
             eval_folder,
             workers=2,
             device="cpu",
             rank=0,
             save=False,
             torch_eval=False,
             shifts=0,
             split=False,
             check=True,
             world_size=1):
    """
    Evaluate model using museval. Run the model
    on a single GPU, the bottleneck being the call to museval.
    """

    source_names = ["drums", "bass", "other", "vocals"]
    output_dir = eval_folder / "results"
    output_dir.mkdir(exist_ok=True, parents=True)
    json_folder = eval_folder / "results/test"
    json_folder.mkdir(exist_ok=True, parents=True)

    # we load tracks from the original musdb set
    test_set = musdb.DB(musdb_path, subsets=["test"])

    for p in model.parameters():
        p.requires_grad = False
        p.grad = None

    pendings = []
    with futures.ProcessPoolExecutor(workers) as pool:
        for index in tqdm.tqdm(range(rank, len(test_set), world_size),
                               file=sys.stdout):
            track = test_set.tracks[index]

            out = json_folder / f"{track.name}.json.gz"
            if out.exists():
                continue

            mix = th.from_numpy(track.audio).t().float()
            ref = mix.mean(dim=0)  # mono mixture
            mix = (mix - ref.mean()) / ref.std()

            estimates = apply_model(model,
                                    mix.to(device),
                                    shifts=shifts,
                                    split=split)
            estimates = estimates * ref.std() + ref.mean()

            estimates = estimates.transpose(1, 2)
            references = th.stack([
                th.from_numpy(track.targets[name].audio)
                for name in source_names
            ])
            if save:
                folder = eval_folder / "wav/test" / track.name
                folder.mkdir(exist_ok=True, parents=True)
                for name, estimate in zip(source_names, estimates):
                    wavfile.write(str(folder / (name + ".wav")), 44100,
                                  estimate.cpu().numpy())

            pendings.append((track.name,
                             pool.submit(museval.evaluate, references.numpy(),
                                         estimates.cpu().numpy())))
            del references, mix, estimates, track

        for track_name, pending in tqdm.tqdm(pendings, file=sys.stdout):
            if not torch_eval:
                pending = pending.result()
            sdr, isr, sir, sar = pending
            track_store = museval.TrackStore(win=44100,
                                             hop=44100,
                                             track_name=track_name)
            for idx, target in enumerate(source_names):
                values = {
                    "SDR": sdr[idx].tolist(),
                    "SIR": sir[idx].tolist(),
                    "ISR": isr[idx].tolist(),
                    "SAR": sar[idx].tolist()
                }

                track_store.add_target(target_name=target, values=values)
                json_path = json_folder / f"{track_name}.json.gz"
                gzip.open(json_path,
                          "w").write(track_store.json.encode('utf-8'))
    if world_size > 1:
        distributed.barrier()
コード例 #6
0
ファイル: MPO.py プロジェクト: ethanabrooks/jax-rl
 def sample_action(self, rng, state):
     mu, log_sig = apply_model(self.actor_optimizer.target, state)
     sig = jnp.abs(log_sig)
     return mu + random.normal(rng, mu.shape) * sig
コード例 #7
0
ファイル: MPO.py プロジェクト: ethanabrooks/jax-rl
 def select_action(self, state):
     mu, _ = apply_model(self.actor_optimizer.target, state)
     return mu