예제 #1
0
def main(_):
    # REVIEW josephz: This paradigm was copied from inference-hack.py
    # initialize_globals()

    sample_dir = "sample"
    # sample_names = ["new_test"]
    sample_names = ["rolling_in_the_deep"]
    post_processor = PostProcessor()
    post_processor.load_weights("weights.h5")
    # sample_names = ["perfect_features"]
    # sample_names = ["rolling_in_the_one_more_time"]
    for sample_name in sample_names:
        console.h1("Processing %s" % sample_name)
        console.time("total processing for " + sample_name)
        sample_path = sample_dir + "/" + sample_name

        style_path = sample_path + "/style.mp3"
        content_path = sample_path + "/content.mp3"
        stylized_img_path = sample_path + "/stylized.png"
        stylized_img_raw_path = sample_path + "/stylized_raw.png"
        stylized_audio_path = sample_path + "/stylized.mp3"
        stylized_audio_raw_path = sample_path + "/stylized_raw.mp3"

        # Read style audio to spectrograms.
        style_audio, style_sample_rate = conversion.file_to_audio(style_path)
        style_img, style_phase = conversion.audio_to_spectrogram(
            style_audio, fft_window_size=1536)

        # Read content audio to spectrograms.
        content_audio, content_sample_rate = conversion.file_to_audio(
            content_path)
        content_img, content_phase = conversion.audio_to_spectrogram(
            content_audio, fft_window_size=1536)
        stylized_img_raw, stylized_img = stylize(content_img, style_img,
                                                 content_phase, style_phase,
                                                 content_path, style_path,
                                                 post_processor)

        # Save raw stylized spectrogram and audio.
        stylized_audio_raw = conversion.amplitude_to_audio(
            stylized_img_raw,
            fft_window_size=1536,
            phase_iterations=15,
            phase=content_phase)
        conversion.image_to_file(stylized_img_raw, stylized_img_raw_path)
        conversion.audio_to_file(stylized_audio_raw, stylized_audio_raw_path)

        # Save post-processed stylized spectrogram and audio.
        stylized_audio = conversion.amplitude_to_audio(stylized_img,
                                                       fft_window_size=1536,
                                                       phase_iterations=15,
                                                       phase=content_phase)
        # np.save("stylized_img.npy", stylized_img)
        # np.save("content_phase.npy", content_phase)
        conversion.image_to_file(stylized_img, stylized_img_path)
        conversion.audio_to_file(stylized_audio, stylized_audio_path)

        console.timeEnd("total processing for " + sample_name)
        console.info("Finished processing %s; saved to %s" %
                     (sample_name, stylized_audio_path))
예제 #2
0
def main():
    stylized_img = np.load("stylized_img.npy")
    content_phase = np.load("content_phase.npy")
    for radius in [0.2, 0.5, 1, 1.5]:
        # stylized_image_sharp = unsharp_mask(stylized_img, radius=radius, amount=1)
        stylized_img_blur = cv2.GaussianBlur(stylized_img, (9, 9), radius)
        stylized_img_sharp = cv2.addWeighted(stylized_img, 1.5,
                                             stylized_img_blur, -0.5, 0,
                                             stylized_img)
        stylized_audio = conversion.amplitude_to_audio(stylized_img_sharp,
                                                       fft_window_size=1536,
                                                       phase_iterations=15,
                                                       phase=content_phase)
        conversion.audio_to_file(
            stylized_audio,
            "/Users/ollin/Desktop/stylized_random_phase.sharpened." +
            str(radius) + ".mp3")
        console.log("Tested radius", radius)
        fundamental_mask, amplitude)
    harmonics = sst.fundamental_to_harmonics(fundamental_freqs,
                                             fundamental_amps, amplitude)
    console.timeEnd("fundamental to harmonics")
    conversion.image_to_file(harmonics, f + ".harmonics.png")

    # pitch normalization haha
    if True:
        pitch_normalized_amp, pitch_normalized_phase = sst.normalize_pitch(
            amplitude, phase, fundamental_freqs, fundamental_amps)
        conversion.image_to_file(pitch_normalized_amp,
                                 f + ".pitch_normalized.png")
        console.stats(pitch_normalized_amp, "pitch_normalized_amp")
        pitch_normalized_audio = conversion.amplitude_to_audio(
            pitch_normalized_amp,
            fft_window_size=1536,
            phase_iterations=1,
            phase=pitch_normalized_phase,
        )
        conversion.audio_to_file(pitch_normalized_audio,
                                 f + ".pitch_normalized.mp3")

    fundamental_audio = conversion.amplitude_to_audio(fundamental_mask,
                                                      fft_window_size=1536,
                                                      phase_iterations=1,
                                                      phase=phase)
    conversion.audio_to_file(fundamental_audio, f + ".fundamental.mp3")

    harmonics_audio = conversion.amplitude_to_audio(harmonics,
                                                    fft_window_size=1536,
                                                    phase_iterations=1,
                                                    phase=phase)
예제 #4
0
def stylize(content, style, content_phase, style_phase, content_path,
            style_path, post_processor):
    stylized = content
    # Pitch fundamental extraction
    console.time("extracting fundamentals")
    content_fundamental_mask = extract_fundamental(content)
    style_fundamental_mask = extract_fundamental(style)
    console.timeEnd("extracting fundamentals")
    console.time("fundamental freqs and amps")
    content_fundamental_freqs, content_fundamental_amps = extract_fundamental_freqs_amps(
        content_fundamental_mask, content)
    style_fundamental_freqs, style_fundamental_amps = extract_fundamental_freqs_amps(
        style_fundamental_mask, style)
    console.timeEnd("fundamental freqs and amps")

    if True:
        console.time("pitch normalization")
        content_normalized, content_normalized_phase = normalize_pitch(
            content,
            content_phase,
            content_fundamental_freqs,
            content_fundamental_amps,
            base_pitch=32)
        style_normalized, style_normalized_phase = normalize_pitch(
            style,
            style_phase,
            style_fundamental_freqs,
            style_fundamental_amps,
            base_pitch=32)
        content_normalized_path = content_path + ".normalized.mp3"
        content_normalized_audio = conversion.amplitude_to_audio(
            content_normalized,
            fft_window_size=1536,
            phase_iterations=1,
            phase=content_normalized_phase)
        conversion.audio_to_file(content_normalized_audio,
                                 content_normalized_path)

        style_normalized_path = style_path + ".normalized.mp3"
        style_normalized_audio = conversion.amplitude_to_audio(
            style_normalized,
            fft_window_size=1536,
            phase_iterations=1,
            phase=style_normalized_phase)
        conversion.audio_to_file(style_normalized_audio, style_normalized_path)

        console.timeEnd("pitch normalization")

    # Featurization
    use_spectral_features = False
    if use_spectral_features:
        # Pitch normalization
        content_features = compute_features(content)
        style_features = compute_features(style)
    if not use_spectral_features:
        # neural features
        content_features = get_feature_array(content_path)
        content_features /= content_features.max()
        #console.stats(content_features, "content features")
        # conversion.image_to_file(content_features[:,:,np.newaxis], "content_features.png")
        #console.debug(content.shape, "content.shape")
        content_features = resize(
            content_features, (content_features.shape[0], content.shape[1]))
        style_features = get_feature_array(style_path)
        style_features /= style_features.max()
        #console.stats(style_features, "style features")
        #console.debug(style.shape, "style.shape")
        # conversion.image_to_file(style_features[:,:,np.newaxis], "style_features.png")
        style_features = resize(style_features,
                                (style_features.shape[0], style.shape[1]))

    # Harmonic recovery
    content_harmonics = fundamental_to_harmonics(content_fundamental_freqs,
                                                 content_fundamental_amps,
                                                 content)
    content_harmonics = grey_dilation(content_harmonics, size=3)
    content_harmonics *= content.max() / content_harmonics.max()
    # Sibilant recovery
    content_sibilants = get_sibilants(content, content_fundamental_amps)
    content_sibilants *= content.max() / content_sibilants.max()

    # Patchmatch
    console.time("patch match")
    if False:
        stylized = audio_patch_rescale(
            content,
            style,
            content_fundamental_freqs,
            style_fundamental_freqs,
            content_features,
            style_features,
            content_harmonics,
            content_sibilants,
        )
    if True:
        stylized = audio_patch_match(content,
                                     style,
                                     content_fundamental_freqs,
                                     style_fundamental_freqs,
                                     content_features,
                                     style_features,
                                     iterations=96)
    console.timeEnd("patch match")
    console.log("normal stylized has shape", stylized.shape)
    # ipdb.set_trace()
    stylized_post_processed = post_processor.predict_unstacked(
        amplitude=np.mean(stylized, axis=2),
        harmonics=np.mean(content_harmonics, axis=2),
        sibilants=np.mean(content_sibilants, axis=2))
    stylized_post_processed = np.dstack([
        stylized_post_processed, stylized_post_processed
    ])  # TODO: actually run the network on both channels instead of doing this
    stylized_post_processed = global_eq_match(stylized_post_processed, style)
    return stylized, stylized_post_processed
예제 #5
0
console.stats(content_sibilants, "content sibilants")
content_harmonics *= content_amplitude.max() / content_harmonics.max()
console.stats(content_harmonics, "content harmonics")
console.timeEnd("super resolution")

console.time("frequency weighting")
# ELEMENT 1: Frequency weighting
for t in range(num_timesteps):
    content_slice = np.maximum(
        content_amplitude[:, t],
        np.maximum(content_harmonics[:, t], content_sibilants[:, t]))
    style_slice = style_amplitude[:, t, :]
    content_env = sst.spectral_envelope(content_slice)
    style_env = sst.spectral_envelope(style_slice)
    weights = np.clip(style_env / (0.001 + content_env), 0, 5)
    stylized_amplitude[:, t, :] = content_slice * weights[:, np.newaxis]
    # amplitude correction
    stylized_amplitude[:, t, :] *= np.clip(
        content_amplitude[:, t].max() /
        (stylized_amplitude[:, t, :].max() + 0.001), 0, 10)

console.timeEnd("frequency weighting")
stylized_audio = conversion.amplitude_to_audio(stylized_amplitude,
                                               fft_window_size=1536,
                                               phase_iterations=1,
                                               phase=content_phase)
conversion.image_to_file(stylized_amplitude,
                         test_content_file + ".stylized-cheat.jpg")
conversion.audio_to_file(stylized_audio,
                         test_content_file + ".stylized-cheat.mp3")