コード例 #1
0
ファイル: model_inference.py プロジェクト: HuangJyunKai/Yee
def fun_load_Seg_model(filepath_model_seg, flag_seg_type):
    if not os.path.isfile(filepath_model_seg):
        print(
            'Pre-trained model (Lane Line Segmentation) file does not exist !!!!!'
        )
        exit(-1)
    if flag_seg_type == 'LaneLine':
        model_seg = Net.ESPNet(classes=12, p=2, q=3)
    elif flag_seg_type == 'Road':
        model_seg = Net.ESPNet_corner_heatmap(classes=3, p=2, q=3)
    model_seg_dict = model_seg.state_dict()
    model_refernce = torch.load(filepath_model_seg, map_location=device)
    pretrained_dict = {
        k: v
        for k, v in model_refernce.items() if k in model_seg_dict
    }
    #    print(pretrained_dict.keys())
    #    print(model_seg_dict.keys())
    model_seg_dict.update(pretrained_dict)
    model_seg.load_state_dict(model_seg_dict)

    model_seg = model_seg.to(device)
    model_seg.eval()
    print('load model (Lane Line Segmentation) : successful')
    return model_seg
コード例 #2
0
    def test_load_data(self):
        """unit testing of load_data method"""

        model = Models()

        model.load_data()

        actual = len(rows)

        expected = 100
        rows.clear()
        self.assertEqual(actual, expected)
コード例 #3
0
    def test_get_songs_details_via_title(self):
        """Unit test of get_songs_details_via_titile method"""

        model = Models()

        model.load_data()

        actual = len(model.get_songs_details_via_title("3AM"))

        rows.clear()

        expected = 1

        self.assertEqual(actual, expected)
コード例 #4
0
    def test_provide_rating(self):
        """Unit tetse of provide_rating method"""

        model = Models()

        model.load_data()

        actual = model.provide_rating('shamsalam', 5)

        expected = {"message": "No song list found with provided id"}

        rows.clear()

        self.assertEqual(actual, expected)
コード例 #5
0
    def test_get_songs(self):
        """Unit test of get_songs method"""

        model = Models()

        model.load_data()

        fetched_record = model.get_songs(1, 1)

        actual = fetched_record[0]['id']

        expected = '5vYA1mW9g2Coh1HUFUSmlb'

        rows.clear()

        self.assertEqual(actual, expected)
コード例 #6
0
    def readInputsOutputs(self, model_name):
        logger.debug("Reading inputs and outputs from optimization model " +
                     str(model_name))
        models = Models()
        data = models.list(model_name, self.connection)
        data = data.decode("utf-8")
        data_set = [x for x in data.split("\n") if "Set" in x]
        data_param_1 = []
        for item in data_set:
            #logger.debug("item param " + str(item))
            item = re.sub("=(.*)", "", item)
            item = re.sub("model.", "", item)
            item = re.sub("\s+\Z", "", item)
            #logger.debug("item param " + str(item) + " type "+str(type(item)))
            if not "#" in item and item != "T":
                #logger.debug("item param " + str(item))
                data_param_1.append(item)
        #logger.debug("data param "+str(data_param_1))
        data_param = [x for x in data.split("\n") if "Param" in x]
        #data_param_1=[]
        for item in data_param:
            #logger.debug("item param "+str(item)
            item = re.sub("=(.*)", "", item)
            item = re.sub("model.", "", item)
            item = re.sub("\s+\Z", "", item)
            if not "#" in item and item != "dT":
                data_param_1.append(item)
        #logger.debug("data param " + str(data_param_1))
        data_var = [x for x in data.split("\n") if "Var" in x]
        data_var_1 = []
        for item in data_var:
            item = re.sub("=(.*)", "", item)
            item = re.sub("model.", "", item)
            item = re.sub("\s+\Z", "", item)
            if not "#" in item:
                data_var_1.append(item)

        start = [
            "control_frequency", "horizon_in_steps", "dT_in_seconds",
            "repetition", "optimization_type", "solver"
        ]
        data_to_return = {}
        data_to_return["inputs"] = data_param_1
        data_to_return["outputs"] = data_var_1
        data_to_return["start"] = start
        #logger.debug("data to return " + str(data_to_return))
        return data_to_return
コード例 #7
0
ファイル: model_trainer.py プロジェクト: kevinlim186/GLSearch

####################### SET MODEL TRAINING #############################
sampleSize = '0'  # choose between 0 to 2. 0 correspondes to 50D, 1 to 100D, 2 to 200D. 3 to 5 is also available but for generation based scaling
loss='WCategoricalCrossentropy' # loss function chosen. choose between "WCategoricalCrossentropy" or "categorical_crossentropy". WCategoricalCrossentropy is the expected loss described in the thesis.



######################THESE PARAMETERS ARE FIXED #########################
esconfig = [0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1] 
#performance = Performance()

timeSteps = 2  # the number of time steps for the LSTM network
precision_value = 'ert-2'

#set to precision value to negative 2
result = Result(ert_column=precision_value)

#load the performance and ELA files generated from data gathering
performance = pd.read_csv("./perf/DataGathering_performance.csv")
ela = pd.read_csv("./perf/DataGathering_elaFeatures.csv")

result.addPerformance(performance)
result.addELA(ela)

#this could sometimes fail if the training sample does not contain at least two time steps. This could happen if the CMA-ES finds the optimal value before the 2nd checkpoint
Xtrain, Ytrain = result.createTrainSet(dataset=sampleSize, algorithm=None, reset=False, interface=None, RNN=timeSteps)


model = Models(Xtrain,Ytrain,_shuffle=False)
model.trainLSTM(stepSize=timeSteps, size=sampleSize, loss=loss, precision_value=precision_value)
コード例 #8
0
"""run.py file """
from src import app
from src.models import Models

mod = Models()

mod.load_data()

if __name__ == '__main__':
    app.run(debug=True)
コード例 #9
0
        final_test_features = input_file(base_feature_path + "/" +
                                         "test_features.p")

    t1 = time.time()

    print("Time for feature extraction is:", t1 - t0)

    # Target variables
    train_target_labels = target_labels(train_stances)
    validation_target_labels = target_labels(validation_stances)
    test_target_labels = target_labels(test.headlineInstances)

    # Modelling the features
    print("Start of Modelling")
    models = Models(final_train_features, final_validation_features,
                    final_test_features, train_target_labels,
                    validation_target_labels, test_target_labels)

    # Calling the 4 models
    models.get_lr()
    models.get_dt()
    models.get_nb()
    models.get_rf()
    '''
    Used read_from_csv in utils to know the actual labels and the predicted labels
    to produce the correctness visualizations graphs for the report.
    '''

    t2 = time.time()
    print("Time for the total is:", t2 - t0)
コード例 #10
0
        api_category command_name required args 
        eg:
        --input_add file_path      - for post
        --input_add file_path id   - for put
    """

    command_to_execute = {}
    command_to_execute = parser()

    #logger.debug("command to execute: "+str(command_to_execute))
    http = Http_ofw(command_to_execute)
    for key, value in command_to_execute["model"].items():
        if value is not None:
            #logger.debug("key exists "+str(key))
            logger.debug("Executing the command model")
            model = Models()
            model.execute(http, command_to_execute)

    for key, value in command_to_execute["data_source"].items():
        if value is not None:
            #logger.debug("key exists "+str(key))
            logger.debug("Executing the command input")
            data_source = Data_source()
            data_source.execute(http, command_to_execute)

    for key, value in command_to_execute["data_output"].items():
        if value is not None:
            #logger.debug("key exists "+str(key))
            logger.debug("Executing the command output")
            data_output = Data_output()
            data_output.execute(http, command_to_execute)
コード例 #11
0
if __name__ == '__main__':
    print('load Stock chart data')
    base_file_path = sys.argv[1]
    stockCharts = StockProcessor(base_file_path)
    print('Input stock price interval in minutes - 5, 15, 30, 60, 240, 1440')
    time_interval = input()
    amazon_stock_prices, apple_stock_prices = stockCharts.loadDataForInterval(time_interval)
     For training only
     document_vectors_amazon, document_vectors_apple = NewsProcessor(base_file_path).loadNewsArticles()
     # AMAZON
     classify = Classifier(base_file_path, 'amazon', time_interval, amazon_stock_prices, document_vectors_amazon)
     classify.label_documents()
     # APPLE
     classify = Classifier(base_file_path, 'apple', time_interval, apple_stock_prices, document_vectors_apple)
     classify.label_documents()
    amazon_model = Models(base_file_path, 'amazon', amazon_stock_prices, time_interval)
    amazon_model.naive_bayes_classifier()
    amazon_model.SVM_classifier()
    amazon_model.DT_classifier()
    amazon_model.SVM_poly_classifier()
    amazon_model.Logistic_Regression11_classifier()
    amazon_model.Logistic_Regression12_classifier()
    amazon_model.KNN_classifier()
    amazon_model.SGDC_classifier()
    amazon_model.accounting_factor()

    apple_model = Models(base_file_path, 'apple', apple_stock_prices, time_interval)
    apple_model.naive_bayes_classifier()
    apple_model.SVM_classifier()
    apple_model.DT_classifier()
    apple_model.SVM_poly_classifier()
コード例 #12
0
def train(device, cfg):
    dataset = Datasets(cfg.dataset)
    model = Models(cfg.model)
    return 0
コード例 #13
0
 def setUp(self):
     """setup method"""
     self.mod = Models()
コード例 #14
0
ela = pd.read_csv("./perf/DataGathering_elaFeatures.csv")

result.addPerformance(performance)
result.addELA(ela)

#this could sometimes fail if the training sample does not contain at least two time steps. This could happen if the CMA-ES finds the optimal value before the 2nd checkpoint
Xtrain, Ytrain = result.createTrainSet(dataset=sampleSize,
                                       algorithm=None,
                                       reset=False,
                                       interface=None,
                                       RNN=timeSteps)

####################### Model Training #############################
loss = 'WCategoricalCrossentropy'  # loss function chosen. choose between "WCategoricalCrossentropy" or "categorical_crossentropy". WCategoricalCrossentropy is the expected loss described in the thesis.

model = Models(Xtrain, Ytrain, _shuffle=False)
model.trainLSTM(stepSize=timeSteps, size=sampleSize, loss=loss)

####################### Model Testing #############################

#depending on the configuration used in the LSTM, the model name can change.
modelName = '_RNN_Hidden2_Dropout_0.2_Grossup_1_StepSize2_Epoch2000_Learning1e-05_Size:2_Loss_WCategoricalCrossentropy'
sampleSizeValue = 200


#custom loss function needs to be redefined
def weightedCategoricalCrossentropy(y_true, y_pred):
    return K.mean(K.sum(y_true * y_pred, axis=1))


model = tf.keras.models.load_model('./models/' + modelName,
コード例 #15
0
"""contains all endpoint"""

from flask import jsonify, request
from src import app
from src.models import Models

mod = Models()


# provide page_no and row_size via query string
# example http://127.0.0.1:5000/record?page_no=2&row_size=3
@app.route('/record', methods=['GET'])
def get_songs_via_pagination():
    """Get songs via page no and page size(implementation of server side pagination)"""

    page_no = request.args.get('page_no')

    row_size = request.args.get('row_size')

    songs_details = mod.get_songs(int(page_no), int(row_size))

    return jsonify(songs_details)


# provide title of song via query string
# example http://127.0.0.1:5000/songs?title=3AM
@app.route('/songs', methods=['GET'])
def get_songs_via_title():
    """Retrieve song via title"""

    title = request.args.get('title')