Exemple #1
0
    def verify_account(self):
        self.usernameError.setText('')
        self.passwordError.setText('')

        data = LoadData()

        if self.usernameInput.text() != '' and self.passwordInput != '':
            if self.usernameInput.text() == '':
                self.usernameError.setText('Please enter a username')
                self.usernameError.adjustSize()

            if self.passwordInput.text() == '':
                self.passwordError.setText('Please enter a password')
                self.passwordError.adjustSize()

            if self.usernameInput.text() not in data.username_list:
                self.usernameError.setText('Please check your username')
                self.usernameError.adjustSize()

            else:
                if self.passwordInput.text() not in data.password_list:
                    self.passwordError.setText('You got the wrong password')
                    self.passwordError.adjustSize()

        else:
            self.usernameError.setText('Please enter a username')
            self.usernameError.adjustSize()

            self.passwordError.setText('Please enter a password')
            self.passwordError.adjustSize()
Exemple #2
0
# Global variables
l_value = [4]
thresholds = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
split = [10, 20, 30, 40, 50, 60, 70, 80, 90]

m = 0
train1 = []
train0 = []
data_vector = []
test1 = []
test0 = []
weights = []
weights_history = []
dev_data = []
temp = []
data = LoadData()


def logistic_function(x, w):  # sigmoid function
    return expit(
        np.dot(w, x[:-1])
    )  # same as 1.0/(1 + np.exp(-np.dot(weights, x[:-1]))) without overflow warning


# last element of x[] = category (or y)


def train(train_total):  # train
    global train0, train1, m, data_vector, weights, dev_data, temp
    if len(temp) == 0:
        data.createDictionary()
Exemple #3
0
        logger.info("Fold %i Accuracy: %.4f", i,
                    model.score(X[valid_idx], y[valid_idx]))
        res[valid_idx, :] = model.predict_proba(X[valid_idx])
        logger.info("Fold %i Log Loss: %.4f", i,
                    log_loss(y[valid_idx], res[valid_idx]))
        i += 1
        if short: break
    if short: return -log_loss(y[valid_idx], res[valid_idx])
    yhat = np.argmax(res, axis=1) + 1
    Y = np.array([int(i[-1]) for i in y])
    logger.info("CV Accuracy: %.5f", accuracy_score(Y, yhat))
    logger.info("CV Log Loss: %.4f", log_loss(y, res))
    return res, -log_loss(y, res)


_, y, _ = LoadData()
del _


def OptSVC(C, gamma):
    model = SVC(C=C, gamma=gamma, probability=True)
    return ReportPerfCV(model, "text", y)


def OptBTC(step_size=.5,
           max_iterations=100,
           row_subsample=.9,
           column_subsample=.9,
           max_depth=8):
    model = BoostedTreesClassifier(step_size=step_size,
                                   max_iterations=max_iterations,
Exemple #4
0
 def load_data(self):
     print('Loading Dataset...')
     self.x_right, self.x_left, self.y_train = LoadData().load()
Exemple #5
0
from model import CreateModel
from data import LoadData
from keras.callbacks import ModelCheckpoint
import tensorflowjs as tfjs
import matplotlib.pyplot as plt

model = CreateModel()
model.compile(optimizer='sgd',
              loss='binary_crossentropy',
              metrics=['binary_accuracy'])
training_data, validation_data = LoadData()
#
## train model
#save_model = ModelCheckpoint('model.h5', save_best_only=True, save_weights_only=True, verbose=1)
#print(f'data generator length: {len(training_data)}')
#model.fit_generator(
#        training_data,
#        validation_data=validation_data,
#        epochs=2,
#        callbacks=[save_model])
#
## fine tuning
#for layer in base_model.layers[:-5]:
#  layer.trainable = False
#model.compile(optimizer='sgd', loss='binary_crossentropy', metrics=['binary_accuracy'])
#model.fit_generator(
#        training_data,
#        validation_data=validation_data,
#        epochs=2,
#        callbacks=[save_model])
## save model
Exemple #6
0
# # In[7]:

# class LabelTransform():
#     def __init__(self, classes):
#         self.classes = classes
#     def transform(self, label):
#         label = self.classes[label]
#         return label

# In[10]:
path = './iris.csv'
classes = {"Iris-setosa": 0, "Iris-versicolor": 1, "Iris-virginica": 2}

labeltransform = LabelTransform(classes)
dataset_return = DatasetCSV(path, labeltransform)
load_data_obj = LoadData(dataset_return)

load_data_obj.prepare_data()

print()
# idxs = list(range(len(dataset_return)))
# random.shuffle(idxs)
# train_idx = int(len(dataset_return) * 0.6)
# cv_idx = train_idx + int(len(dataset_return) * 0.2)
# train = idxs[: train_idx]
# cv = idxs[train_idx: cv_idx]
# test = idxs[cv_idx:]
# # In[ ]:

# train_dataset = Subset(dataset_return, train)
# cv_dataset = Subset(dataset_return, cv)