def _make_model(opt, batch_shape):
     ipt = Input(batch_shape=batch_shape)
     x = Dense(batch_shape[-1])(ipt)
     out = Dense(batch_shape[-1])(x)
     model = Model(ipt, out)
     model.compile(opt, 'mse')
     return model
示例#2
0
    def _make_softmax_model():
        ipt = Input(batch_shape=(batch_size, 8))
        x = Dense(n_classes)(ipt)
        out = Activation('softmax')(x)

        model = Model(ipt, out)
        model.compile('adam', 'categorical_crossentropy')
        return model
示例#3
0
    def _make_multi_io_model():
        ipt1 = Input((40, 8))
        ipt2 = Input((40, 16))
        ipts = concatenate([ipt1, ipt2])
        out1 = GRU(6, return_sequences=True)(ipts)
        out2 = GRU(12, return_sequences=True)(ipts)

        model = Model([ipt1, ipt2], [out1, out2])
        model.compile('adam', 'mse')
        return model
示例#4
0
    def make_model(batch_shape, layer_kw={}):
        """Conv1D autoencoder"""
        dim = batch_shape[-1]
        bdim = dim // 2

        ipt = Input(batch_shape=batch_shape)
        x = Conv1D(dim, 8, activation='relu', **layer_kw)(ipt)
        x = Conv1D(bdim, 1, activation='relu', **layer_kw)(x)  # bottleneck
        out = Conv1D(dim, 8, activation='linear', **layer_kw)(x)

        model = Model(ipt, out)
        model.compile('adam', 'mse')
        return model
 def setUp(self):
     # Load the library containing the custom backend.
     self.library_path = get_custom_backend_library_path()
     torch.ops.load_library(self.library_path)
     # Create an instance of the test Module and lower it for
     # the custom backend.
     self.model = to_custom_backend(torch.jit.script(Model()))
示例#6
0
def audio_widget(key):
    username = st.text_input("Username", value="", key=key)
    uploaded_file = st.file_uploader("Choose an audio file", key=key)

    if uploaded_file is not None and len(username) > 0:
        audio_bytes = uploaded_file.read()
        st.audio(audio_bytes, format="audio/wav")

        if st.button(key):
            audio_path = store_bytes_as_wav(audio_bytes, username)
            if key == "Enroll":
                Model.enroll(audio_path, username)
            elif key == "Verify":
                st.write(Model.verify(audio_path, username))

            remove_file(audio_path)
def _make_model(batch_shape,
                l1_reg=None,
                l2_reg=None,
                bidirectional=True,
                dense_constraint=None,
                embed_input_dim=None,
                sparse=False):
    def _make_reg(l1_reg, l2_reg):
        if l1_reg is not None and l2_reg is None:
            return l1(l1_reg)
        elif l1_reg is None and l2_reg is not None:
            return l2(l2_reg)
        elif l1_reg is not None and l2_reg is not None:
            return l1_l2(l1_reg, l2_reg)
        else:
            return None

    reg = _make_reg(l1_reg, l2_reg)

    if dense_constraint is not None:
        dense_constraint = maxnorm(dense_constraint)

    ipt = Input(batch_shape=batch_shape)
    if sparse:
        x = Embedding(embed_input_dim, embed_input_dim * 3 + 1,
                      mask_zero=True)(ipt)
    else:
        x = ipt
    gru = GRU(4, recurrent_regularizer=reg, bias_regularizer=reg)
    if bidirectional:
        x = Bidirectional(gru)(x)
    else:
        x = gru(x)
    x = Dense(2, kernel_regularizer=reg, kernel_constraint=dense_constraint)(x)
    if sparse:
        out = Dense(2, activation='softmax')(x)
    else:
        out = Dense(1, activation='sigmoid')(x)

    return Model(ipt, out)
示例#8
0
文件: main.py 项目: Void-TK-57/DSP
def main_read(path):

    dataframe = pd.read_csv(path, index_col=0)

    # create model
    model = Model(40, 10, [50, 40], ["sigmoid", "relu", "softmax"], 'adam',
                  0.001, ['accuracy'], "categorical_crossentropy")

    #features index
    features_index = list([str(i) for i in range(40)]) + ["file"]
    # x values
    x_values = dataframe[features_index].values

    y_values = to_categorical(dataframe["Digito"].values, 10)

    # split into x values and y values
    x_train, x_test, y_train, y_test = train_test_split(x_values,
                                                        y_values,
                                                        train_size=0.7)

    history = model.train(x_train[:, :-1], y_train, epochs=200).history

    plot_history(history)

    results = model.evaluate(x_test[:, :-1], y_test)

    y_predict = model.predict_classes(x_test[:, :-1])

    files = x_test[:, -1]
    print(len(files))

    print(len(y_predict))

    result = pd.DataFrame(np.stack([files, y_predict], axis=1),
                          columns=["File", "Predicao"])

    result.to_csv("predicao.csv")

    print(result)
示例#9
0
    uploaded_file = st.file_uploader("Choose an audio file", key=key)

    if uploaded_file is not None and len(username) > 0:
        audio_bytes = uploaded_file.read()
        st.audio(audio_bytes, format="audio/wav")

        if st.button(key):
            audio_path = store_bytes_as_wav(audio_bytes, username)
            if key == "Enroll":
                Model.enroll(audio_path, username)
            elif key == "Verify":
                st.write(Model.verify(audio_path, username))

            remove_file(audio_path)


def main():
    st.title("Speaker Verification")
    enroll_menu = st.beta_expander("Enroll user", expanded=True)
    verify_menu = st.beta_expander("Verify user", expanded=False)

    with enroll_menu:
        audio_widget("Enroll")
    with verify_menu:
        audio_widget("Verify")


if __name__ == "__main__":
    model = Model()
    main()
示例#10
0
def verify_user_identity(audio: UploadFile = File(...),
                         username: str = Form(...)):
    path = store_audio(audio, username)
    pred = Model.verify(path, username)
    clear_audio_embeddings()
    return pred
示例#11
0
def enroll_user(audio: UploadFile = File(...), username: str = Form(...)):
    path = store_audio(audio, username)
    Model.enroll(path, username)
    clear_audio_embeddings()
    return "user profile added"