示例#1
0
def train_model(args):
    """
    Trains the model with the input data specified in the CLI arguments followed by an evaluation of the model.

    Args:
        args: The object that contains all the parsed CLI arguments. 
    """

    print(colored("\nReading input data...", "green"))
    training_files = []
    model = None

    if os.path.isdir(args.training_file_path):
        training_dir = args.training_file_path
        for f in os.listdir(training_dir):
            if f.endswith(".lsvm"):
                training_files.append(training_dir + f)

        for idx, train_file in enumerate(training_files):
            args.training_file_path = train_file
            input_dict = read_input_data("train", args)

            if idx == 0:
                if args.model_type == "et":
                    model = ExtraTreesModel()
                elif (args.model_type == "mlp"):
                    model = MlpModel()
                elif (args.model_type == "cnn"):
                    model = CnnModel(input_dict)

                model.build_new_model()
            else:
                model.preprocess_input(input_dict)

            training_dict = model.train(input_dict)
            print_training_report(training_dict)

    else:
        input_dict = read_input_data("train", args)

        if args.model_type == "et":
            model = ExtraTreesModel()
        elif (args.model_type == "mlp"):
            model = MlpModel()
        elif (args.model_type == "cnn"):
            model = CnnModel(input_dict)
        elif (args.model_type == "rnn"):
            model = GruModel(input_dict)

        model.build_new_model()
        training_dict = model.train(input_dict)
        print_training_report(training_dict)

    testing_dict = model.test(input_dict)

    print_testing_report(testing_dict)

    model.save_model(args.output_model_path)
示例#2
0
def predict(args):
    from c_py_interface.c_interface import CLibInterface

    if (args.model_type == "et"):
        model = ExtraTreesModel()
    elif (args.model_type == "mlp"):
        model = MlpModel()
    elif (args.model_type == "cnn"):
        model = CnnModel()

    model.load_model(args.model_path)

    clib = CLibInterface(args.c_library)
    clib.open_file(args.prediction_file_path)

    dict_input = {}
    dict_input['softmax'] = args.use_softmax_output
    dict_input['prediction'] = {}

    while (True):
        N = clib.read_next()

        if (N < 0):
            break

        for i in range(0, N):
            k = clib.count_roads(i)
            if (k > 0):
                roads = clib.read_roads(k, i)
                dict_input['prediction']['data'] = roads
                pred = model.predict(dict_input)
                preds = pred['predictions']
                clib.write_roads(preds, i)
示例#3
0
def test_model(args):
    """
    Evaluates the model with the input data specified in the CLI arguments.

    Args:
        args: The object that contains all the parsed CLI arguments. 
    """

    print(colored("\nReading input data...", "green"))
    input_dict = read_input_data("test", args)

    model = None

    if (args.model_type == "et"):
        model = ExtraTreesModel()
    elif (args.model_type == "mlp"):
        model = MlpModel()
    elif (args.model_type == "cnn"):
        model = CnnModel(input_dict)
    elif (args.model_type == "rnn"):
        model = GruModel(input_dict)

    model.load_model(args.model_path)

    testing_dict = model.test(input_dict)

    print_testing_report(testing_dict)
示例#4
0
def train_model(args):
    """
    Trains the model with the input data specified in the CLI arguments followed by an evaluation of the model.

    Args:
        args: The object that contains all the parsed CLI arguments. 
    """

    print(colored("\nReading input data...", "green"))
    input_dict = read_input_data("train", args)

    model = None

    if args.model_type == "et":
        model = ExtraTreesModel()
    elif (args.model_type == "mlp"):
        model = MlpModel()
    elif (args.model_type == "cnn"):
        model = CnnModel(in_dict=input_dict)

    model.build_new_model()
    training_dict = model.train(input_dict)

    testing_dict = model.test(input_dict)

    print_training_report(training_dict)
    print_testing_report(testing_dict)

    model.save_model(args.output_model_path)
def parameter_search_GLOBAL(paddVal, numConvLayers, numFilters, kernelSize,
                            numFcLayers, neurons, batch, epochs, dropRate,
                            learnRate, classificType, aaSize, modelNum):
    for numConvL in numConvLayers:
        for numFcL in numFcLayers:
            for kernel in kernelSize:
                for learn in learnRate:
                    # for classif in classificType:
                    # para a camada 1 avalia 2 numfiltros diferentes, camada 2 avalia 4 combinações diferentes e para 3 avalia 8 combinações de filtros diferentes
                    for i in range(0, 2**numConvL):
                        filters = random.sample(numFilters, k=numConvL)
                        for j in range(0, 2**numFcL):
                            neur = random.sample(neurons, k=numFcL)
                            for drop in dropRate:
                                print('\nWith Dropout:')
                                path = "models/savedModels/model_" + str(
                                    modelNum) + ".h5"
                                modelName = str(
                                    modelNum
                                ) + '_GLOBAL_Conv:' + str(filters) + '-' + str(
                                    kernel) + "x" + str(kernel) + "_FC:" + str(
                                        neur) + "_Drop:" + str(
                                            drop) + "_Learn:" + str(learn)
                                modelNum += 1
                                print(modelName)
                                model = CnnModel(paddVal, numConvL, filters,
                                                 kernel, [], numFcL, neur,
                                                 batch, epochs, drop, learn,
                                                 'FC', path, aaSize, 'global',
                                                 'yes', 'fc')
                                save_results([modelName] + model.results)
示例#6
0
def predict(args):
    if (args.model_type == "et"):
        model = ExtraTreesModel()
    elif (args.model_type == "mlp"):
        model = MlpModel()
    elif (args.model_type == "cnn"):
        model = CnnModel()

    model.load_model(args.model_path)

    # TODO continue implementation
    raise NotImplementedError
def parameter_search_GAP(paddVal, numConvLayers, numFilters, kernelSize,
                         poolSize, batch, epochs, learnRate, classificType,
                         aaSize, modelNum):
    for numConvL in numConvLayers:
        for kernel in kernelSize:
            for learn in learnRate:
                # for classif in classificType:
                # para a camada 1 avalia 2 numfiltros diferentes, camada 2 avalia 4 combinações diferentes e para 3 avalia 8 combinações de filtros diferentes
                for i in range(0, 2**numConvL):
                    filters = random.sample(numFilters, k=numConvL)
                    for pool in poolSize:
                        path = "models/savedModels/model_" + str(
                            modelNum) + ".h5"
                        modelName = str(modelNum) + '_GAP' + "_Conv:" + str(
                            filters) + '-' + str(kernel) + "x" + str(
                                kernel) + '_Pool:' + str(
                                    pool) + "_Learn:" + str(learn)
                        modelNum += 1
                        print(modelName)
                        model = CnnModel(paddVal, numConvL, filters, kernel,
                                         pool, [], [], batch, epochs, [],
                                         learn, 'FC', path, aaSize, 'local',
                                         'no', 'gap')
                        save_results([modelName] + model.results)
def parameter_search_LOCAL(paddVal, activFun, numConvLayers, numFilters,
                           kernelSize, poolSize, numFcLayers, neurons, batch,
                           epochs, dropRate, learnRate, classificType, aaSize,
                           modelNum, archNum):
    for numConvL in numConvLayers:
        for numFcL in numFcLayers:
            for kernel in kernelSize:
                for learn in learnRate:
                    # for classif in classificType:
                    # para a camada 1 avalia 2 numfiltros diferentes, camada 2 avalia 4 combinações diferentes e para 3 avalia 8 combinações de filtros diferentes
                    for i in range(0, 2**numConvL):
                        filters = random.sample(numFilters, k=numConvL)
                        for j in range(0, 2**numFcL):
                            neur = random.sample(neurons, k=numFcL)
                            for pool in poolSize:
                                for drop in dropRate:
                                    for l in range(1, 5, 3):
                                        # To test different datasets
                                        # l=1 - dataset is the negatives
                                        # l=2 - dataset is the benchmark (suppAB)
                                        # l=3 - dataset is the benchmark (suppCD)
                                        # l=4 - dataset is the benchmark (suppE)
                                        print('\nDATA: ' + str(l))
                                        print('\nOPT:1')

                                        # Optimizer 1 - adam
                                        # Relu activation Function
                                        path = "models/savedModels/model_" + str(
                                            modelNum) + ".h5"
                                        modelName = str(
                                            modelNum
                                        ) + '_LOCAL' + "_Conv:" + str(
                                            filters
                                        ) + '-' + str(kernelSize) + "x" + str(
                                            kernelSize) + '_Pool:' + str(
                                                poolSize) + "_FC:" + str(
                                                    neur) + "_ActivFun:" + str(
                                                        activFun[0]
                                                    ) + "_Drop:" + str(
                                                        dropRate
                                                    ) + "_Learn:" + str(
                                                        learnRate
                                                    ) + "_Opt:" + str(
                                                        1
                                                    ) + "_DataUsed:" + str(l)
                                        print(modelName)
                                        model = CnnModel(
                                            paddVal[l - 1], activFun[0],
                                            numConvL, filters, kernel, pool,
                                            numFcL, neur, batch, epochs, drop,
                                            learn, path, aaSize, archNum, 1, 1,
                                            l)
                                        save_results([modelName] +
                                                     model.results)
                                        modelNum += 1
                                        # Swish activation Function
                                        path = "models/savedModels/model_" + str(
                                            modelNum) + ".h5"
                                        modelName = str(
                                            modelNum
                                        ) + '_LOCAL' + "_Conv:" + str(
                                            filters
                                        ) + '-' + str(kernelSize) + "x" + str(
                                            kernelSize) + '_Pool:' + str(
                                                poolSize) + "_FC:" + str(
                                                    neur) + "_ActivFun:" + str(
                                                        activFun[1]
                                                    ) + "_Drop:" + str(
                                                        dropRate
                                                    ) + "_Learn:" + str(
                                                        learnRate
                                                    ) + "_Opt:" + str(
                                                        1
                                                    ) + "_DataUsed:" + str(l)
                                        print(modelName)
                                        model = CnnModel(
                                            paddVal[l - 1], activFun[1],
                                            numConvL, filters, kernel, pool,
                                            numFcL, neur, batch, epochs, drop,
                                            learn, path, aaSize, archNum, 1, 1,
                                            l)
                                        save_results([modelName] +
                                                     model.results)
                                        modelNum += 1

                                        # Optimizer 3 - rmsProp
                                        # Relu activation Function
                                        print('\nOPT:3')
                                        path = "models/savedModels/model_" + str(
                                            modelNum) + ".h5"
                                        modelName = str(
                                            modelNum
                                        ) + '_LOCAL' + "_Conv:" + str(
                                            filters
                                        ) + '-' + str(kernelSize) + "x" + str(
                                            kernelSize) + '_Pool:' + str(
                                                poolSize) + "_FC:" + str(
                                                    neur) + "_ActivFun:" + str(
                                                        activFun[0]
                                                    ) + "_Drop:" + str(
                                                        dropRate
                                                    ) + "_Learn:" + str(
                                                        learnRate
                                                    ) + "_Opt:" + str(
                                                        3
                                                    ) + "_DataUsed:" + str(l)
                                        print(modelName)
                                        model = CnnModel(
                                            paddVal[l - 1], activFun[0],
                                            numConvL, filters, kernel, pool,
                                            numFcL, neur, batch, epochs, drop,
                                            learn, path, aaSize, archNum, 1, 3,
                                            l)
                                        save_results([modelName] +
                                                     model.results)
                                        modelNum += 1
                                        # Swish activation Function
                                        path = "models/savedModels/model_" + str(
                                            modelNum) + ".h5"
                                        modelName = str(
                                            modelNum
                                        ) + '_LOCAL' + "_Conv:" + str(
                                            filters
                                        ) + '-' + str(kernelSize) + "x" + str(
                                            kernelSize) + '_Pool:' + str(
                                                poolSize) + "_FC:" + str(
                                                    neur) + "_ActivFun:" + str(
                                                        activFun[1]
                                                    ) + "_Drop:" + str(
                                                        dropRate
                                                    ) + "_Learn:" + str(
                                                        learnRate
                                                    ) + "_Opt:" + str(
                                                        3
                                                    ) + "_DataUsed:" + str(l)
                                        print(modelName)
                                        model = CnnModel(
                                            paddVal[l - 1], activFun[1],
                                            numConvL, filters, kernel, pool,
                                            numFcL, neur, batch, epochs, drop,
                                            learn, path, aaSize, archNum, 1, 3,
                                            l)
                                        save_results([modelName] +
                                                     model.results)
                                        modelNum += 1
    # print(modelName)
    # model=CnnModel(paddVal[dataToLoad-1],actFun[0],len(filters),filters,kernelSize[0],poolSize[0],len(neur),neur,batch,epochs,dropRate[0],learnRate[0],path,dictionarySize,archNum,1,opt,dataToLoad,concatOrMulti)
    # save_results([modelName]+model.results)

    modelNum = modelNum + 1
    path = "models/savedModels/model_" + str(modelNum) + ".h5"
    modelName = str(modelNum) + '_LOCAL' + "_Conv:" + str(filters) + '-' + str(
        kernelSize[0]) + "x" + str(kernelSize[0]) + '_Pool:' + str(
            poolSize[0]) + "_FC:" + str(neur) + "_ActivFun:" + str(
                actFun[1]) + "_Drop:" + str(dropRate) + "_Learn:" + str(
                    learnRate) + "_Opt:" + str(3) + "_DataUsed:" + str(
                        dataToLoad) + '_' + str(concatOrMulti)
    print(modelName)
    model = CnnModel(paddVal[dataToLoad - 1], actFun[0],
                     len(filters), filters, kernelSize[0], poolSize[0],
                     len(neur), neur, batch, epochs, dropRate[0], learnRate[0],
                     path, dictionarySize, archNum, 1, 3, dataToLoad,
                     concatOrMulti)
    save_results([modelName] + model.results)

    modelNum = modelNum + 1
    path = "models/savedModels/model_" + str(modelNum) + ".h5"
    modelName = str(modelNum) + '_ConvLstm' + "_Conv:" + str(
        filters) + '-' + str(kernelSize[0]) + "x" + str(
            kernelSize[0]) + '_Pool:' + str(
                poolSize[0]) + '_LSTM:80' + "_ActivFun:" + str(
                    actFun[0]) + "_Learn:" + str(learnRate[0]) + "_Opt:" + str(
                        opt) + "_DataUsed:" + str(dataToLoad) + '_' + str(
                            concatOrMulti)
    print(modelName)
    model = ConvLstm(paddVal[dataToLoad - 1], filters, kernelSize[0],