def attempt_init():
    global init_status, init_error
    try:
        init_status = 'In Progress...'
        from model.model import init
        init()
        init_status = 'Successful'
        init_error = ''
    except:
        init_status = 'failed'
        init_error = traceback.format_exc()
示例#2
0
def main():
    reload(sys)
    sys.setdefaultencoding('utf-8')
    init()
    credit()
示例#3
0
from model import model
from messages import input_message, output_message

if __name__ == "__main__":
    # Parameters
    cache_enabled = False
    model_debug = False

    steps = 1
    step_output = True

    # Initialize model
    model = model("out/libmodel.so",
                  db_path="db_test",
                  cache_enabled=cache_enabled)
    model.init()

    # Setup model input
    input_data = input_message()

    print("Input:")
    print(input_data)

    # Prepare model output
    output_data = output_message()

    # Measure time
    start_usec = datetime.datetime.now()

    # Call model
    for step in range(steps):
示例#4
0
def initialize_model():
    """
    Before running any tests, we will initialize the model.
    """
    init()
示例#5
0
def trainModel(params):
    if params.features is None:
        assert params.vocab, "No vocab provided"
    assert params.trainData, "No trainData"
    assert params.validationData, "No validationData"
    assert params.modelFile, "No modelFile provided"

    if params.features is None:
        print("Vocab size     :", len(params.vocab))
        v_size = len(params.vocab)
    else:
        print("Features count :", params.features.len())
        v_size = params.features.len()
    print("Hidden layer   :", params.hidden)
    print("Word vec size  :", params.wordVecSize)
    print("Use GPU        :", params.gpu)
    print("Minibatches    :", params.minibatches)
    print("Models out dir :", params.modelFile)
    print("Train Data     :", params.trainData.len)
    print("Validation Data:", params.validationData.len)

    m = model.init(vocabularySize=v_size,
                   punctuationSize=len(data.PUNCTUATION_VOCABULARY),
                   hidden=params.hidden,
                   word_vector_size=params.wordVecSize,
                   optimizer=params.optimizer,
                   gpu=params.gpu,
                   use_features=params.features is not None)
    m.summary(150)
    # keras.utils.plot_model(m, 'punc.png')
    # keras.utils.plot_model(m, 'punc_full.png', show_shapes=True)

    if params.features is None:
        gen_train = data.Generator(X=params.trainData.X,
                                   y=params.trainData.y,
                                   batch_size=params.minibatches)
        gen_valid = data.Generator(X=params.validationData.X,
                                   y=params.validationData.y,
                                   batch_size=params.minibatches)
    else:
        gen_train = data.FeaturesGenerator(m_data=params.trainData,
                                           features=params.features,
                                           batch_size=params.minibatches)
        gen_valid = data.FeaturesGenerator(m_data=params.validationData,
                                           features=params.features,
                                           batch_size=params.minibatches)

    print("Training", file=sys.stderr)

    checkpoint = ModelCheckpoint(filepath=params.modelFile,
                                 monitor='loss',
                                 verbose=1,
                                 save_best_only=False,
                                 mode='min',
                                 period=1)
    es = EarlyStopping(monitor='val_loss', mode='min', verbose=1)
    callbacks = [checkpoint, es]
    if params.callback is not None:
        callbacks.insert(0, params.callback)

    return m.fit_generator(
        generator=gen_train,
        validation_data=gen_valid,
        epochs=params.maxEpochs,
        verbose=1,
        # workers=8,
        # use_multiprocessing=True,
        callbacks=callbacks)