Esempio n. 1
0
def Generate(matrix_len, alpha_len):
    key = np_round((alpha_len - 1) * random.rand(matrix_len, matrix_len))
    inverted_key = ModMatInv(key, alpha_len, matrix_len)
    while array_equal(inverted_key, zeros((matrix_len, matrix_len))) is True:
        key = np_round((alpha_len - 1) * random.rand(matrix_len, matrix_len))
        inverted_key = ModMatInv(key, alpha_len, matrix_len)
    return key, inverted_key
def predict_image(path_of_image, groupStage):
    path_of_model = os_path.join("./CUSTOMIZE_4_USER/MODEL_TRAINING",
                                 groupStage, groupStage + ".pth")
    path_of_feature = os_path.join("./CUSTOMIZE_4_USER/MODEL_TRAINING",
                                   groupStage, groupStage + ".npz")

    start_time = time()
    model = NeuralNet(input_size, hidden_size, num_classes).to(device)
    model.load_state_dict(load(path_of_model))

    data = np_load(path_of_feature)
    [h_max, s_max, v_max] = data['data_max']
    [h_min, s_min, v_min] = data['data_min']

    img = imread(path_of_image)
    img = resize(img, (6000, 4000))
    img = img[500:-500, 750:-750, :]
    img = cvtColor(img, COLOR_BGR2HSV)
    hchan, schan, vchan = split(img)
    h_hist = calcHist([img], [0], None, [256], [0, 256]).reshape(256, )
    s_hist = calcHist([img], [1], None, [256], [0, 256]).reshape(256, )
    v_hist = calcHist([img], [2], None, [256], [0, 256]).reshape(256, )

    hMean = np_mean(hchan) / 255
    DPV_h_max = np_sum(np_absolute(h_hist - h_max)) / (HEIGHT * WIDTH)
    DPV_h_min = np_sum(np_absolute(h_hist - h_min)) / (HEIGHT * WIDTH)

    sMean = np_mean(schan) / 255
    DPV_s_max = np_sum(np_absolute(s_hist - s_max)) / (HEIGHT * WIDTH)
    DPV_s_min = np_sum(np_absolute(s_hist - s_min)) / (HEIGHT * WIDTH)

    vMean = np_mean(vchan) / 255
    DPV_v_max = np_sum(np_absolute(v_hist - v_max)) / (HEIGHT * WIDTH)
    DPV_v_min = np_sum(np_absolute(v_hist - v_min)) / (HEIGHT * WIDTH)

    correlation = np_corrcoef(h_hist, s_hist)[0][1]

    #image_feature = np_array((hMean, DPV_h_max, DPV_h_min, sMean, DPV_s_max, DPV_s_min, vMean, DPV_v_max, DPV_v_min))
    image_feature = np_array((hMean, DPV_h_max, DPV_h_min, sMean, DPV_s_max,
                              DPV_s_min, correlation))
    image_feature = from_numpy(image_feature).to(device).float().view(
        1, input_size)

    with no_grad():
        out_predict = model(image_feature)
        _, predicted_result = torch_max(out_predict.data, 1)
        original = Tensor([[1, 33, 66, 99]])

    # Round xx.xx %
    percentage_result = np_round(
        mm(out_predict.view(1, num_classes), original.view(num_classes,
                                                           1)).item(), 2)

    # Processed time
    processedTime = np_round(time() - start_time, 2)
    #print("Time  ",processedTime)

    return percentage_result, processedTime
Esempio n. 3
0
def integration_to_seconds(integration, filetype):
    """Return a 1D numpy array of seconds from the beginning of the dataset
       using the integration time of each dataset sample

    integration: numpy 1D array of data integration values
    """
    if filetype == "fits":
        return np_round(integration/1000, 3)
    else:
        clock = 1.0 / 200000000
        return np_round(integration * clock * 1024, 3)
Esempio n. 4
0
def concatenate_all_pickled_wall_normal_cases(folder,output_pickle):
    from os import listdir
    from os.path import join,split
    import pandas as pd
    from re import findall
    from numpy import round as np_round

    pickled_files = [f for f in listdir( folder ) \
                     if f.endswith('.p')\
                     and not f == split(output_pickle)[1]]

    concatenated_df = pd.DataFrame()

    for p in pickled_files:
        df         = pd.read_pickle( join( folder, p ) )
        df['file'] = p

        case_name = findall(
            '[A-Za-z0-9_]+',p
        )[0]

        df['case_name']    = case_name
        df['near_x']       = np_round( df.x )
        df['delta_99']     = 0
        df['near_y_delta'] = 0
        df['near_y']       = 0

        print "   For the case {0}".format(case_name)
        print "     found the following x locations"
        print "     {0}".format(df.near_x.unique())

        for x in df.near_x.unique():
            bl = get_bl_parameters( case_name, x )
            df['delta_99'].loc[df.near_x == x] = bl.delta_99.values[0]
            df['near_y'].loc[df.near_x == x] = \
                    np_round( df.loc[df.near_x == x].y , 1 )
            df['near_y_delta'].loc[df.near_x == x] = \
                    map(
                        lambda p: \
                        find_nearest(p, expected_wall_normal_locations), 
                        df.loc[df.near_x == x].y / bl.delta_99.values[0]
                    )


        concatenated_df = concatenated_df.append(
            df, ignore_index = True
        )

    concatenated_df.to_pickle( output_pickle )
Esempio n. 5
0
def _convert_8bits(img):
    """
    Convertit le résultat en array uint8 avec des valeurs
    comprises entre 0 et 255
    """
    res_8b = np_round(resultat)
    res_8b[res_8b > 255] = 255
    return res_8b.astype(np_uint8)
Esempio n. 6
0
def check_quotients_near_double(A,
                                B,
                                C,
                                D,
                                *,
                                num_calls_triple=0,
                                num_calls_double=0):
    """
    Get the quotients for the case when the roots are almost a double.
    """
    qs = get_quotients_near_double(A, B, C, D)
    pos_case = check_pairwise(qs, isclose)
    neg_case = check_pairwise([qs[0], -qs[1]], isclose)
    if not (pos_case or neg_case):
        return False, None

    if pos_case:
        λ = get_shared_figs(qs)
    elif neg_case:
        λ = get_shared_figs(array([qs[0], -qs[1]]))
    else:
        raise RuntimeError("double quotients check has problem")

    a = A
    b = -B / 3
    c = C / 3
    d = -D

    b_dash = b - λ * a

    c_dash = c - λ * b
    c_ddash = c_dash - λ * b_dash

    if d < a * np_pow(λ, 3):
        D = np_round(d)
        δ = d - D
        d_dash = D - λ * c
        d_ddash = d_dash - λ * c_dash
        d_star = (d_ddash - λ * c_ddash) + δ
    else:
        d_dash = d - λ * c
        d_ddash = d_dash - λ * c_dash
        d_star = d_ddash - λ * c_ddash

    case, roots = solve_cubic(
        a,
        -3 * b_dash,
        3 * c_ddash,
        -d_star,
        num_calls_triple=num_calls_triple,
        num_calls_double=num_calls_double + 1,
        recurse=(num_calls_double < 10),
    )

    return True, (case, roots + λ)
Esempio n. 7
0
    def linspace(self):
        if type(self.ran) is tuple:
            try:
                logging.debug(f"using ran: {self.ran}, type {type(self.ran[0])} for linspace")
                self._linspace = np_round(
                    np_linspace(self.ran[0],
                                self.ran[-1],
                                self.n), decimals=3)
                logging.debug(f"running linspace for {self.name}, if linspace: {type(self._linspace)}")
            except:
                raise(ValueError("Could not assign linspace"))
            return self._linspace
        else:
            self._linspace = np_array(self.val)

        return self._linspace
Esempio n. 8
0
def _disc(a, b, c):
    """
    Python implementation of DISC
    """
    if a * c > 0:
        a = np_abs(a)
        c = np_abs(c)
        loop_cont = True  # loop must run at least once
        while loop_cont:
            loop_cont = False
            if a < c:
                a, c = c, a
            n = np_round(b / c)
            if n != 0:
                α = a - n * b
                if α >= -a:
                    b = b - n * c
                    a = α - n * b
                    if a > 0:
                        loop_cont = True
    return np_pow(b, 2) - a * c
Esempio n. 9
0
    print("File name: " + data_file)

    for dataset in datasets:
        print("Table: " + to_text_string(dataset.th))
        gain = None
        shift = None
        integration = None
        print(
            "Time from epoch \t UT time \t\t\t\t gain \t integration \t shift")
        for row in dataset.th.iterrows():
            if (row['gain'] != gain) or (row['integration'] != integration
                                         ) or (row['shift'] != shift):
                #                print (str(row['time'])+" "+str(row['subtime'])+" "+
                ut_time = time.gmtime(row['time'])
                ut_msec = str(np_round(row['subtime'] * CLOCK, 3)).split('.')
                print(
                    str(np_round(row['time'] + row['subtime'] * CLOCK, 3)) +
                    " \t\t " + str(ut_time.tm_year) + " " +
                    str(ut_time.tm_mon) + " " + str(ut_time.tm_mday) + " " +
                    str(ut_time.tm_hour) + ":" + str(ut_time.tm_min) + ":" +
                    str(ut_time.tm_sec) + "." + ut_msec[1] + "\t " +
                    str(row['gain']) + " \t " + str(row['integration']) +
                    " \t\t " + str(row['shift']))

                gain = row['gain']
                integration = row['integration']
                shift = row['shift']
        print "\n\r"

    print "\n\r"
Esempio n. 10
0
 def format_output(self, value):
     return np_round(value, self.decimal_places)
Esempio n. 11
0
 def predict(self, input_data):
     raw_output = self.raw_predict(input_data)
     return np_round(raw_output)
Esempio n. 12
0
def survey():
    """Survey home page."""
    N_SIMULATION_PERIODS = get_n_periods()
    db = get_db()
    user_data = db.execute(
        "SELECT * FROM user WHERE id = ?",
        (session["user_id"],)
    ).fetchone()
    user_stage = user_data['current_stage']
    simulation_period = user_data['simulation_period']
    user_treatment_level = user_data['treatment_level']
    display_dict = {}

    if user_stage == 'simulation':
        fig_url = \
            blog_functions.get_fig_url(user_treatment_level, simulation_period)

        experiment_data, rec_param_demand_data = \
            blog_functions.get_experiment_data(
                db, simulation_period, user_treatment_level)

        rec_param_demand_data_cols = ['Q_rec', 'v', 'p']
        display_dict.update({x: int(rec_param_demand_data[x].tolist()[0])
                                    for x in rec_param_demand_data_cols})
        show_recs = True

        calc_decision_suffixes = ['_Q']
        calc_decision_list = [
            x + y for x in ['calc', 'decision'] for y in calc_decision_suffixes
        ]
        display_dict.update(
            {x: 0 for x in calc_decision_list})
        display_dict.update(
            {'calc_errors': [],
            'calc_n_errors': 0,
            'decision_errors': [],
            'decision_n_errors': 0,
            'expected_profit': 0}
        )

        # need an empty dataframe before history is made
        temp_display_df_cols = ['Period',  
                    'Ordered From Supplier', 
                    'Demand',
                    'Profit ($)']

        if ((simulation_period >= 2) 
            & (simulation_period <= N_SIMULATION_PERIODS)):
            # get the relevant historical data and display it as a table
            temp_exp_df = experiment_data.loc[
                (experiment_data['ID'] == user_treatment_level)
                & (experiment_data['Period'] < simulation_period)][
                ['Period', 'Demand']]

            # now get the ful contracts table for this user
            temp_user_contracts_df = read_sql_query(
                "SELECT * FROM contracts WHERE user_id = "\
                + str(session["user_id"]), con=db
            )

            temp_display_df = temp_exp_df.merge(
                temp_user_contracts_df, 
                how='left', 
                left_on='Period', right_on='simulation_period')

            temp_display_df = blog_functions.get_contract_metrics(
                temp_display_df, 
                display_dict['v'], 
                display_dict['p'], 
                'Demand', 
                'q' 
            ) 

            temp_display_df.rename(
                {'q': 'Ordered From Supplier',
                'sales': 'Sales (Units)',
                'lost_sales': 'Lost Sales (Units)',
                'profit': 'Profit ($)'}, axis=1, inplace=True)
                
            cols = ['Period', 'Demand', 
                    'Ordered From Supplier', 
                    'Profit ($)']

            temp_display_df = temp_display_df[temp_display_df_cols]
        else:
            temp_display_df = DataFrame(columns=temp_display_df_cols)

    if request.method == 'GET':
        if user_stage == 'simulation':
            if request.args.get('action') == 'Calculate': 
                validate = blog_functions.validate_input()
                error_list = blog_functions.do_validate_instructions(
                    validate, display_dict, request, 'calc_Q', 'calc'
                )
                    
                if len(error_list) == 0:
                    expected_profit = blog_functions.get_expected_profit(
                        int(display_dict['v']), 
                        int(display_dict['p']),
                        int(request.args.get('calc_Q'))
                    ) 

                    display_dict.update({
                        'expected_profit': np_round(expected_profit, 2)
                        })

                    update_calculator_count = user_data['calculator_count'] + 1

                    db.execute("UPDATE user"
                        " SET calculator_count = ?"
                        " WHERE id = ?;",
                        (update_calculator_count,
                        session["user_id"]))
                    db.commit()
                
                return render_template("blog/" + user_stage + ".html",
                    display_dict=display_dict,
                    simulation_period=simulation_period,
                    historical_table=temp_display_df.to_html(
                        index=False, 
                        justify='left'),
                    fig_url=fig_url,
                    show_recs=show_recs)
            
            if simulation_period <= N_SIMULATION_PERIODS:
                return render_template("blog/" + user_stage + ".html",
                    display_dict=display_dict,
                    simulation_period=simulation_period,
                    historical_table=temp_display_df.to_html(
                        index=False, 
                        justify='left'),
                    fig_url=fig_url,
                    show_recs=show_recs)
            
            else:
                db.execute("UPDATE user"
                        " SET current_stage = ?"
                        " WHERE id = ?;",
                        (shuttle_dict[user_stage], session["user_id"]))
                db.commit()
        
        if user_stage == 'risk':
            return render_template("blog/" + user_stage + ".html",
                    question_dict=QUESTION_DICT,
                    risk_preference_dict=RISK_PREFERENCE_DICT)

        if user_stage == 'risk_answer':
            given_answer = RISK_PREFERENCE_DICT['RP9'][user_data['RP9']]
            answer_list = ['You chose ' + given_answer + '.']
            answer_list.extend(
                ['The computer chose ' + UNFORTUNATE_RP9[user_data['RP9']][0] \
                    + ' points.'])
            answer_list.extend(['If you would have chosen "' + \
            RISK_PREFERENCE_DICT['RP9'][1 - user_data['RP9']] + 
            '", you would have won ' + \
            UNFORTUNATE_RP9[user_data['RP9']][1] + ' points!'])
            return render_template("blog/" + user_stage + ".html",
                answer_list=answer_list)

        return render_template("blog/" + user_stage + ".html")
    
    if request.method == 'POST':
        if user_stage == 'demographics':
            gender = request.form.get('gender')
            age = request.form.get('age')
            sc = request.form.get('sc')
            procurement = request.form.get('procurement')

            db.execute("UPDATE user"
                        " SET gender = ?, age = ?, sc_exp = ?,"
                        " procurement_exp = ?, current_stage = ?"
                        " WHERE id = ?;",
                        (gender, age, sc, procurement, 
                        shuttle_dict[user_stage], session["user_id"]))
            db.commit()

        if user_stage == 'cognitive':
            db.execute("UPDATE user"
                        " SET CRT1 = ?, CRT2 = ?, CRT3 = ?,"
                        " CRT4 = ?, CRT5 = ?, CRT6 = ?, CRT7 = ?,"
                        " current_stage = ?, enter_simulation = ?"
                        " WHERE id = ?;",
                        (request.form.get("CRT1"), 
                        request.form.get("CRT2"),
                        request.form.get("CRT3"),
                        request.form.get("CRT4"),
                        request.form.get("CRT5"),
                        request.form.get("CRT6"),
                        request.form.get("CRT7"), 
                        shuttle_dict[user_stage], 
                        datetime.now(),
                        session["user_id"]))
            db.commit()

        if user_stage == 'simulation':
            if simulation_period <= N_SIMULATION_PERIODS:
                validate = blog_functions.validate_input()
                error_list = blog_functions.do_validate_instructions(
                    validate, display_dict, request, 'decision_Q', 'decision'
                )

                if len(error_list) == 0:
                    db.execute("INSERT INTO contracts"
                        "(user_id, simulation_period, q, time_stamp,"
                        " calculator_count)"
                        "VALUES (?, ?, ?, ?, ?);",
                        (session["user_id"], 
                        simulation_period, 
                        int(request.form.get('decision_Q')),
                        datetime.now(), 
                        user_data['calculator_count'])
                    )
                    db.commit()

                    update_simulation_period = simulation_period + 1

                    if simulation_period < N_SIMULATION_PERIODS:
                        db.execute("UPDATE user"
                                " SET simulation_period = ?"
                                " WHERE id = ?",
                                (update_simulation_period,
                                session["user_id"]))
                        db.commit()
                    else:
                        # go to the risk survey
                        db.execute("UPDATE user"
                                " SET current_stage = ?"
                                " WHERE id = ?;",
                                (shuttle_dict[user_stage], session["user_id"]))
                        db.commit()
                else:
                    return render_template("blog/" + user_stage + ".html",
                                    display_dict=display_dict,
                                    simulation_period=simulation_period,
                                    historical_table=temp_display_df.to_html(
                                                    index=False, 
                                                    justify='left'),
                                    fig_url=fig_url,
                                    show_recs=show_recs)
        
        if user_stage == 'risk':
            fin_answer_dict = {x: request.form.get(x) 
                for x in QUESTION_DICT.keys()
            }
            risk_answer_dict = {x: request.form.get(x) 
                for x in RISK_PREFERENCE_DICT.keys()
            }
            
            all_updates = [shuttle_dict[user_stage]]
            all_updates.extend([int(fin_answer_dict[x]) 
                for x in fin_answer_dict.keys()])
            all_updates.extend([int(risk_answer_dict[x]) 
                for x in risk_answer_dict.keys()])
            all_updates.extend([session["user_id"]])
            
            db.execute("UPDATE user"
                    " SET current_stage = ?,"
                    " Fin1 = ?, Fin2 = ?, Fin3 = ?, Fin4 = ?, Fin5 = ?, Fin6 = ?,"
                    " RP1 = ?, RP2 = ?, RP3 = ?, RP4 = ?, RP5 = ?, RP6 = ?,"
                    " RP7 = ?, RP8 = ?, RP9 = ?"
                    "WHERE id = ?;",
                    tuple(all_updates)
                    )
            db.commit()

        if user_stage == 'risk_answer':
            answer = request.form.get('RP10')
            db.execute("UPDATE user"
                    " SET current_stage = ?,"
                    " RP10 = ?"
                    "WHERE id = ?;",
                    (shuttle_dict[user_stage], answer, session["user_id"])
                    )
            db.commit()

        if user_stage == 'thankyou':
            feedback = request.form.get('feedback_input')
            db.execute("UPDATE user"
                        " SET feedback = ?, current_stage = ?"
                        " WHERE id = ?;",
                        (feedback, shuttle_dict[user_stage], 
                        session["user_id"]))
            db.commit()
            session.clear()
            return redirect(url_for("blog.survey"))
                
        return redirect(url_for("blog.survey"))
Esempio n. 13
0
def fmilo(array_):
    np_round(multiply(array_ ,0.67328, array_), out=array_)
def kappa(y_true, y_pred, weights=None, allow_off_by_one=False,
          min_rating=None,
          max_rating=None):
    """
    Calculates the kappa inter-rater agreement between two the gold standard
    and the predicted ratings. Potential values range from -1 (representing
    complete disagreement) to 1 (representing complete agreement).  A kappa
    value of 0 is expected if all agreement is due to chance.

    In the course of calculating kappa, all items in `y_true` and `y_pred` will
    first be converted to floats and then rounded to integers.

    It is assumed that y_true and y_pred contain the complete range of possible
    ratings.

    This function contains a combination of code from yorchopolis's kappa-stats
    and Ben Hamner's Metrics projects on Github.

    :param y_true: The true/actual/gold labels for the data.
    :type y_true: array-like of float
    :param y_pred: The predicted/observed labels for the data.
    :type y_pred: array-like of float
    :param weights: Specifies the weight matrix for the calculation.
                    Options are:

                        -  None = unweighted-kappa
                        -  'quadratic' = quadratic-weighted kappa
                        -  'linear' = linear-weighted kappa
                        -  two-dimensional numpy array = a custom matrix of
                           weights. Each weight corresponds to the
                           :math:`w_{ij}` values in the wikipedia description
                           of how to calculate weighted Cohen's kappa.

    :type weights: str or numpy array
    :param allow_off_by_one: If true, ratings that are off by one are counted as
                             equal, and all other differences are reduced by
                             one. For example, 1 and 2 will be considered to be
                             equal, whereas 1 and 3 will have a difference of 1
                             for when building the weights matrix.
    :type allow_off_by_one: bool
    """
    from numpy import round as np_round
    from numpy import empty as np_empty
    from numpy import bincount,outer,count_nonzero

    # Ensure that the lists are both the same length
    assert(len(y_true) == len(y_pred))

    y_true = np_round(y_true).astype(int)
    y_pred = np_round(y_pred).astype(int)

    # Figure out normalized expected values
    if min_rating is None:
        min_rating = min(min(y_true), min(y_pred))
    if max_rating is None:
        max_rating = max(max(y_true), max(y_pred))

    # shift the values so that the lowest value is 0
    # (to support scales that include negative values)
    y_true = y_true - min_rating
    y_pred = y_pred - min_rating

    # Build the observed/confusion matrix
    num_ratings = max_rating - min_rating + 1
    #from sklearn.metrics import confusion_matrix
    observed = confusion_matrix(y_true, y_pred,
                                labels=list(range(num_ratings)))
    num_scored_items = float(len(y_true))

    # Build weight array if weren't passed one
    from six import string_types
    if isinstance(weights, string_types):
        wt_scheme = weights
        weights = None
    else:
        wt_scheme = ''
    if weights is None:
        weights = np_empty((num_ratings, num_ratings))
        for i in range(num_ratings):
            for j in range(num_ratings):
                diff = abs(i - j)
                if allow_off_by_one and diff:
                    diff -= 1
                if wt_scheme == 'linear':
                    weights[i, j] = diff
                elif wt_scheme == 'quadratic':
                    weights[i, j] = diff ** 2
                elif not wt_scheme:  # unweighted
                    weights[i, j] = bool(diff)
                else:
                    raise ValueError('Invalid weight scheme specified for '
                                     'kappa: {}'.format(wt_scheme))

    hist_true = bincount(y_true, minlength=num_ratings)
    hist_true = hist_true[: num_ratings] / num_scored_items
    hist_pred = bincount(y_pred, minlength=num_ratings)
    hist_pred = hist_pred[: num_ratings] / num_scored_items
    expected = outer(hist_true, hist_pred)

    # Normalize observed array
    observed = observed / num_scored_items

    # If all weights are zero, that means no disagreements matter.
    k = 1.0
    if count_nonzero(weights):
        k -= (sum(sum(weights * observed)) / sum(sum(weights * expected)))

    return k
Esempio n. 15
0
 def format_output(self, value):
     return np_round(value, self.decimal_places)
Esempio n. 16
0
def round(x: Tensor, inplace=False) -> Tensor:
    rounded = x if inplace else deepcopy(x)
    rounded.name += ' (rounded)'
    rounded.value = np_round(x.value)

    return rounded
Esempio n. 17
0
File: dan.py Progetto: Neocher/neupy
 def predict(self, input_data):
     raw_output = self.raw_predict(input_data)
     return np_round(raw_output)
Esempio n. 18
0
def predict_image(path_of_image, groupStage):
    path_of_model = os_path.join("./CUSTOMIZE_4_USER/MODEL_TRAINING", groupStage, groupStage+".pth")
    path_of_feature = os_path.join("./CUSTOMIZE_4_USER/MODEL_TRAINING", groupStage, groupStage+".npz")
    hidden_path = os_path.join("./CUSTOMIZE_4_USER/MODEL_TRAINING", groupStage, groupStage+"_hidden.txt")
    # if hidden number file is not existing, return 0
    try:
        with open(hidden_path,"r") as f:
            hidden_size = int(f.readline())
    except FileNotFoundError:
        print("ERROR: No hidden number file in folder")
        return 0.0,0
    
    # Calculate processing time 
    start_time = time()
    model = NeuralNet(input_size, hidden_size, num_classes).to(DEVICE)
    model.load_state_dict(load(path_of_model))

    data = np_load(path_of_feature)
    [h_max, s_max, v_max] = data['data_max']
    [h_min, s_min, v_min] = data['data_min']
    
    img = imread(path_of_image)
    img = resize(img, (6000,4000))
    img = img[500:-500, 750:-750, :]
    img = cvtColor(img, COLOR_BGR2HSV)
    hchan, schan, vchan = split(img)
    h_hist = calcHist([img], [0], None, [256], [0,256]).reshape(256,)
    s_hist = calcHist([img], [1], None, [256], [0,256]).reshape(256,)
    v_hist = calcHist([img], [2], None, [256], [0,256]).reshape(256,)
    
    # 7 features consist of :
    # + Compute mean value pixel of H channel
    # + Dissilarity with H channel of "max" image
    # + Dissilarity with H channel of "min" image
    # + Compute mean value pixel of S channel
    # + Dissilarity with S channel of "max" image
    # + Dissilarity with S channel of "min" image
    # + Correlation between histogram of H and S channel
    hMean = np_mean(hchan)/255
    DPV_h_max = np_sum(np_absolute(h_hist - h_max))/(HEIGHT*WIDTH)
    DPV_h_min = np_sum(np_absolute(h_hist - h_min))/(HEIGHT*WIDTH)
    
    sMean = np_mean(schan)/255
    DPV_s_max = np_sum(np_absolute(s_hist - s_max))/(HEIGHT*WIDTH)
    DPV_s_min = np_sum(np_absolute(s_hist - s_min))/(HEIGHT*WIDTH)
    
    vMean = np_mean(vchan)/255
    DPV_v_max = np_sum(np_absolute(v_hist - v_max))/(HEIGHT*WIDTH)
    DPV_v_min = np_sum(np_absolute(v_hist - v_min))/(HEIGHT*WIDTH)

    correlation = np_corrcoef(h_hist, s_hist)[0][1]
    
    #image_feature = np_array((hMean, DPV_h_max, DPV_h_min, sMean, DPV_s_max, DPV_s_min, vMean, DPV_v_max, DPV_v_min))
    image_feature = np_array((hMean, DPV_h_max, DPV_h_min, sMean, DPV_s_max, DPV_s_min, correlation))
    image_feature = from_numpy(image_feature).to(DEVICE).float().view(1, input_size)

    with no_grad():
        out_predict = model(image_feature)
        
    # Round xx.xx %
    percentage_result = np_round(out_predict.item()*99, 2)
    if percentage_result >99.99:
        percentage_result = 99.99

    if percentage_result <1.0:
        percentage_result = 1.0    
    # Processed time 
    processedTime = np_round(time()-start_time, 2)

    return percentage_result, processedTime