Exemple #1
0
def plot_param_learning_curves():
    x_data, _y, full_data = data_get.get_data('as7262 mango', average=False)
    pls = PLS(n_components=6)
    print(full_data.columns)
    currents = full_data['LED current'].unique()
    times = full_data['integration time'].unique()
    print(currents, times)
    print(full_data['saturation check'].unique())
    figure, axes, = plt.subplots(len(currents),
                                 len(times),
                                 figsize=(9, 12),
                                 constrained_layout=True)

    figure.suptitle("Parameter scan of new AS7262 Mango data")
    # figure.suptitle("Gradient Boosting Regressor fit\nAS7262 Betel data")
    # axes_ = [axes[0][0], axes[0][1], axes[0][2], axes[0][3],
    #          axes[1][0], axes[1][1], axes[1][2], axes[1][3],
    #          axes[2][0], axes[2][1], axes[2][2], axes[2][3],
    #          axes[3][0], axes[3][1], axes[3][2], axes[3][3],
    #          axes[4][0], axes[4][1], axes[4][2], axes[4][3],]

    current_i = 0
    time_i = 0
    for current in currents:
        for time in times:
            X, Y = data_get.get_data("as7262 mango",
                                     integration_time=time,
                                     led_current=current,
                                     return_type="XY")

            X = StandardScaler().fit_transform(X)
            X = PolynomialFeatures().fit_transform(X)

            Y = Y['Total Chlorophyll (µg/mg)']
            title = str(time * 2.8) + " ms " + current
            print(title)

            plot_learning_curve(pls,
                                title,
                                X,
                                Y,
                                cv=cv,
                                ax=axes[current_i][time_i],
                                ylim=[-0.3, -.1])

            time_i += 1
        time_i = 0
        current_i += 1
Exemple #2
0
 def call(self):
     #create d and g models
     self.d_model = Discriminator(y_input=self.y_input,
                                  x_input=self.x_input,
                                  n_filters=self.gfilters /
                                  (2**self.n_convols),
                                  n_convols=self.n_convols,
                                  rgb=self.rgb).build()
     self.g_model = Generator(self.y_input,
                              self.x_input,
                              n_filters=self.gfilters,
                              n_convols=self.n_convols,
                              rgb=self.rgb).build()
     #create gan using d and g models
     self.infogan = GAN(self.d_model,
                        self.g_model,
                        self.results_directory,
                        noise_size=62,
                        relu_alpha=self.relu_alpha)
     #compile gan
     self.infogan.compile(
         d_optimizer=keras.optimizers.Adam(learning_rate=self.d_learn),
         g_optimizer=keras.optimizers.Adam(learning_rate=self.g_learn))
     #get data ready
     self.X = get_data(self.image_directory, self.y_input, self.x_input,
                       self.batch_size, self.rgb, self.n_convols)
Exemple #3
0
"""

__author__ = "Kyle Vitatus Lopin"

import numpy as np
import pandas as pd
from sklearn.model_selection import cross_validate, RepeatedKFold
from sklearn.pipeline import make_pipeline

# local files
import data_get
import full_regrs
import processing

filename = "as7262 new mango results.xlsx"
x_data, _y, data = data_get.get_data('as7262 mango')

all_regressors = full_regrs.get_all_regrs()
all_transformers = full_regrs.get_transformers()
print(all_regressors)

print(x_data)

chloro_types = [
    'Chlorophyll a (µg/mg)', 'Chlorophyll b (µg/mg)',
    'Total Chlorophyll (µg/mg)'
]

_y = _y['Total Chlorophyll (µg/mg)']
print(_y)
cv = RepeatedKFold(n_splits=4, n_repeats=15)
# Copyright (c) 2019 Kyle Lopin (Naresuan University) <*****@*****.**>

"""

"""

__author__ = "Kyle Vitatus Lopin"


import pandas as pd

from sklearn.linear_model import LassoCV
from sklearn.model_selection import cross_validate, ShuffleSplit
from sklearn.preprocessing import PolynomialFeatures
# local files
import data_get

est = LassoCV()
cv = ShuffleSplit(n_splits=100)
# single read
for read in [1, 2, 3]:
    X, Y = data_get.get_data("as7262 mango", integration_time=200, current="25 mA",
                             position=read, return_type="XY")

from sklearn.linear_model import LassoCV, SGDRegressor
from sklearn.model_selection import LeaveOneOut, LeaveOneGroupOut
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, FunctionTransformer, PolynomialFeatures
# local files
import data_get
import processing
import sfs

plt.style.use('seaborn')

# fitting_data = pd.read_csv('as7262_roseapple.csv')
x_data, _, fitting_data = data_get.get_data('as7262 mango',
                                            integration_time=150,
                                            led_current="12.5 mA",
                                            average=False)
# x_data, _, fitting_data = data_get.get_data('as7262 mango', integration_time=200,
#                                             led_current="12.5 mA",
#                                             read_number=2)
# x_data1, _, fitting_data2 = data_get.get_data('as7262 mango', integration_time=50,
#                                             led_current="12.5 mA",
#                                             read_number=2)
# x_data2, _, fitting_data3 = data_get.get_data('as7262 mango', integration_time=150,
#                                             led_current="25 mA",
#                                             read_number=2)
#
# x_data = pd.concat([x_data, x_data1, x_data2], axis=1)
# print(fitting_data.columns)
# x_data["Leaf number"] = fitting_data['Leaf number']
Exemple #6
0
# installed libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# local files
import data_get
import processing

plt.style.use('seaborn')

# data = pd.read_csv("as7262_mango.csv")
# x_data, data = data_getter.get_data('as7262 roseapple')
x_data, _, data = data_get.get_data(
    'as7262 mango',
    integration_time=200,
    led_current="50 mA",
)
print(data)
print(data.columns)
# data = data.loc[(data['position'] == 'pos 2')]
# data = data.loc[(data['integration time'] == 3)]
# data = data.groupby('Leaf number', as_index=True).mean()

accent_column = data['Total Chlorophyll (µg/mg)'].to_numpy()
# accent_column = np.ones_like(data.iloc[:, 1])
accent_column = accent_column / max(accent_column)
# print(accent_column/max(accent_column))

# alphas = np.linspace(0.1, 1, 10)
colors = np.zeros((data.shape[0], 4))
Exemple #7
0
from sklearn.linear_model import LassoCV, SGDRegressor, LinearRegression
from sklearn.model_selection import RepeatedKFold
from sklearn.neural_network import MLPRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.svm import SVR
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, RobustScaler, FunctionTransformer, PolynomialFeatures
# local files
import data_get


plt.style.use('seaborn')

# fitting_data = pd.read_csv('as7262_roseapple.csv')
x_data, _, fitting_data = data_get.get_data('as7262 mango', integration_time=150,
                                            led_current="12.5 mA",
                                            read_number=2)

# regr = PLSRegression(n_components=9)
# regr = LassoCV(max_iter=5000)
regr = LinearRegression()


y = fitting_data['Total Chlorophyll (µg/cm2)']

x_scaled_np = StandardScaler().fit_transform(x_data)
x_scaled_np = PolynomialFeatures(degree=2).fit_transform(x_scaled_np)

print(y)
print(x_scaled_np)
Exemple #8
0
# installed libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.cross_decomposition import PLSRegression
from sklearn import linear_model
from sklearn.model_selection import cross_validate, GroupShuffleSplit, RepeatedKFold
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
# local files
import data_get

plt.style.use('seaborn')

x_data, _y, full_data = data_get.get_data('as7262 mango', average=False)

print(full_data.columns)
currents = full_data['LED current'].unique()
times = full_data['integration time'].unique()
print(currents, times)
print(full_data['saturation check'].unique())
pls = PLSRegression(n_components=6)
# pls = linear_model.LinearRegression()
cv = RepeatedKFold(n_splits=5, n_repeats=20)
cv_group = GroupShuffleSplit(n_splits=200)
scores = []
labels = []
errors = []
training_scores = []
training_errors = []
Exemple #9
0
# installed libraries
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
from sklearn.metrics import mean_absolute_error, r2_score
from sklearn.preprocessing import RobustScaler, StandardScaler
# local files
import data_get
import processing

plt.style.use('seaborn')

# fitting_data = pd.read_csv('as7262_roseapple.csv')
# fitting_data = pd.read_csv("as7262_ylang.csv")
x_data, _y, fitting_data = data_get.get_data('as7262 mango')
print(fitting_data.columns)
# fitting_data = fitting_data.loc[(fitting_data['Total Chlorophyll (ug/ml)'] < 0.5)]
# fitting_data = fitting_data.loc[(fitting_data['LED current'] == 2)]
# fitting_data = fitting_data.loc[(fitting_data['position'] == 'pos 2')]
# print(fitting_data)
fitting_data = fitting_data.groupby('Leaf number', as_index=True).mean()
#
# fitting_data = fitting_data.drop(["Leaf: 50"])

data_columns = []
for column in fitting_data.columns:
    if 'nm' in column:
        data_columns.append(column)
#
spectrum_data = fitting_data[data_columns]