def load_audio_classifier(folder):
    print('Loading config folder: ' + folder)

    config_file, caffemodel_file, net_proto = get_filenames(folder)

    sbd.SbdConfig(config_file)
    temp_proto = make_audio_temp_deploy(folder, net_proto)

    net = caffe.Net(temp_proto, caffemodel_file, caffe.TEST)

    classifier = AudioClassifier(net)

    return classifier
def main(model_folder, example_folder):
    config_file, caffemodel_file, net_proto = get_filenames(model_folder)
    sbd.SbdConfig(config_file)
    ctm_file, pitch_file, energy_file = get_audio_files(example_folder)

    # parse ctm_file, pitch_file and energy_file
    parser = AudioParser(ctm_file, pitch_file, energy_file)
    parser.parse()

    classifier = load_audio_classifier(model_folder)

    data = classifier.predict_audio(parser)
    print(data)
def load_lexical_classifier(folder, vector):
    print('Loading config folder: ' + folder)

    config_file, caffemodel_file, net_proto = get_filenames(folder)

    sbd.SbdConfig(config_file)
    temp_proto = make_lexical_temp_deploy(folder, net_proto)

    net = caffe.Net(temp_proto, caffemodel_file, caffe.TEST)

    if vector:
        classifier = LexicalClassifier(net, vector)
    else:
        classifier = LexicalClassifier(net, vector)

    return classifier
                    plain_text_instances_file.write(s.encode('utf8'))

                    # write to level db
                    level_db.write_training_instance(training_instance)

        plain_text_instances_file.close()


if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description='create test and train datasets as a lmdb.')
    parser.add_argument('config_file', help="path to config file")
    args = parser.parse_args()

    # initialize config
    sbd.SbdConfig(args.config_file)

    # create proper name for the database
    SENTENCE_HOME = os.environ['SENTENCE_HOME']
    data_folder = "/mnt/naruto/sentence/data/"
    LEVEL_DB_DIR = "leveldbs"

    database = SENTENCE_HOME + "/" + LEVEL_DB_DIR + "/" + sbd.SbdConfig.get_db_name_from_config(
        sbd.config)

    # check if database already exists
    if os.path.isdir(database):
        print("Deleting " + database + ". y/N?")
        sys.stdout.flush()
        s = raw_input()
        if s != "Y" and s != "y":
Beispiel #5
0
def load_config(model_folder, model):
    default_model = os.path.join(route_folder, model_folder, model)
    config_file, caffemodel_file, net_proto = get_filenames(default_model)
    sbd.SbdConfig(config_file)
Beispiel #6
0
    parser.add_argument('vectorfile', help='the google news word vector', default='demo_data/GoogleNews-vectors-negative300.bin', nargs='?')
    parser.add_argument('-nd','--no-debug', help='do not use debug mode, google vector is read', action='store_false', dest='debug', default=DEBUG)
    args = parser.parse_args()

    route_folder = args.routefolder

    #### load lexical model ####

    lexical_models = get_options(route_folder, LEXICAL_MODEL_FOLDER)
    default_lexical_model = os.path.join(route_folder, LEXICAL_MODEL_FOLDER, lexical_models[0])

    # get the caffe files
    config_file, caffemodel_file, net_proto = get_filenames(default_lexical_model)

    # read the config file
    config_file = sbd.SbdConfig(config_file)

    if not args.debug:
        vector = Word2VecFile(args.vectorfile)
        lexical_classifier = load_lexical_classifier(default_lexical_model, vector)
    else:
        vector = None
        lexical_classifier = load_lexical_classifier(default_lexical_model, vector)

    #### load audio model ####

    audio_models = get_options(route_folder, AUDIO_MODEL_FOLDER)
    default_audio_model = os.path.join(route_folder, AUDIO_MODEL_FOLDER, audio_models[0])

    # get the caffe files
    config_file, caffemodel_file, net_proto = get_filenames(default_audio_model)
 def _load_config(self, model_folder):
     config_file, caffemodel_file, net_proto = get_filenames(model_folder)
     sbd.SbdConfig(config_file)