Exemplo n.º 1
0
def TrainingButton():
    start = time.time()

    #train_model(percent, 1, 4)  # for eyes
    train_model(percent, 1, 2)  # for mouth

    end = time.time()
Exemplo n.º 2
0
    def create_model(self, train_path, acc=0.92, nb_epoch=50, test_path=None):
        """
       创建新模型
       """
        model_names = [
            fname.split('/')[-1].split('.')[0] for fname in os.listdir('model')
        ]
        if self.model_name in model_names:
            raise NameError(
                "model name %s has existed! please change another one." %
                model_name)

        model, ctc_model = build_model(self.width, self.height, self.code_len,
                                       self.n_class)

        model = train_model(train_path,
                            self.ctable,
                            model,
                            ctc_model,
                            self.model_name,
                            self.code_len,
                            acc=acc,
                            nb_epoch=nb_epoch)
        model.save('model/%s.h5' % self.model_name)
        return model
Exemplo n.º 3
0
def test_train_model():
    X_train = np.array([1, 2, 3, 4, 5, 6]).reshape(-1, 1)
    y_train = np.array([10, 9, 8, 8, 6, 5])
    data = {"train": {"X": X_train, "y": y_train}}

    reg_model = train_model(data, {"alpha": 1.2})

    preds = reg_model.predict([[1], [2]])
    np.testing.assert_almost_equal(preds, [9.93939393939394, 9.03030303030303])
Exemplo n.º 4
0
def iterate(number, train_sents, test_sents, unlabeled_sents, fdict, gazetteer_list):

    model_name = str(number) + ".model"
    new_model_name = str(number + 1) + ".model"

    print '####round####: ', number

    if number == 0:
        count = 0
        for k, v in gazetteer_list.iteritems():
            count += len(v)
            print k,len(v)
        print 'total : ',count
        train_x, train_y, test_x, test_y = get_train_test_pair(train_sents, test_sents, fdict, gazetteer_list)
        train_model(train_x, train_y, new_model_name, {})
        pred_y = tagging(new_model_name, test_x)

        print_classification_report(test_y, pred_y)

        return new_model_name

    else:
        unlabeled_x = [sent2feature(sent, fdict, gazetteer_list) for sent in unlabeled_sents]
        pred_utrain_y = tagging(model_name, unlabeled_x)
        update_gazetteer(gazetteer_list, unlabeled_x, pred_utrain_y, 0)

        count = 0
        for k, v in gazetteer_list.iteritems():
            count += len(v)
            print k,len(v)
        print 'total : ',count

        unlabeled_x = [sent2feature(sent, fdict, gazetteer_list) for sent in unlabeled_sents]
        train_x, train_y, test_x, test_y = get_train_test_pair(train_sents, test_sents, fdict, gazetteer_list)
        new_train_x, new_train_y = mix_labeled_unlabeled_data(train_x, train_y, unlabeled_x, pred_utrain_y)

        train_model(new_train_x, new_train_y, new_model_name, {})

        new_test_x = [sent2feature(sent, fdict, gazetteer_list) for sent in test_sents]
        pred_y2 = tagging(new_model_name, new_test_x)

        print_classification_report(test_y, pred_y2)

        return new_model_name
Exemplo n.º 5
0
def main():
    # GPU Setting
    device = torch.device(
        "cuda:0" if torch.cuda.is_available() else "cpu")  # CUDA Setting

    # Data Loader
    image_datasets, dataloaders, dataset_sizes = data_loader_make(
        image_path_train, image_path_val, data_path_train, data_path_val,
        data_transforms, shuffle, batch_size, resize_pixel)
    class_names = sorted(list(set(image_datasets['train'].label)))
    class_to_idx = {cl: i for i, cl in enumerate(class_names)}
    idx_to_class = {v: k for k, v in class_to_idx.items()}

    # Load model
    model_conv = models.resnet18(pretrained=False)
    num_features = model_conv.fc.in_features
    model_conv.fc = nn.Linear(num_features, class_num)
    #print('Our model')
    #print(model_conv)

    # Sorting Model
    bilinear = torch.nn.Bilinear(1, 1, 1)
    for param in bilinear.parameters():
        param.require_grad = True

    criterion = nn.CrossEntropyLoss()
    criterion2 = nn.CrossEntropyLoss(reduce=False)
    # Observe that only parameters of final layer are being optimized as
    # opposed to before.
    optimizer_conv = optimizer_choice(optimizer_method, model_conv,
                                      learning_rate)
    optimizer_order = optim.Adam(filter(lambda p: p.requires_grad,
                                        bilinear.parameters()),
                                 lr=learning_rate)
    lr_step_scheduler = lr_scheduler.StepLR(
        optimizer_conv, step_size=20,
        gamma=0.1)  # Decay LR by a factor of 0.1 every step_size
    nn.utils.clip_grad_norm_(model_conv.parameters(),
                             max_grad_norm)  # Gradient Clipping
    model_conv, train_error_list, val_error_list = train_model(
        method, model_conv, dataloaders, dataset_sizes, criterion, criterion2,
        optimizer_conv, lr_step_scheduler, class_to_idx, image_datasets,
        batch_size, order_count, ascending, num_epochs, aug_percent)

    # Model Save
    print('Result Saving...')
    # File Directory Setting
    if not os.path.isdir('./result_visualization/{}'.format(seed_time)):
        os.mkdir('./result_visualization/{}'.format(seed_time))
    pd.DataFrame({
        'train_error': train_error_list,
        'val_error': val_error_list
    }).to_csv('./result_visualization/{}/results_{}_{}_{}_{}_{}.csv'.format(
        seed_time, method, order_count, seed_time, ascending, aug_percent))
    print('Done!')
Exemplo n.º 6
0
    def test_train_model_with_empty_list(self) -> None:  # pylint: disable=R0201
        """ Test train_model() with no data.
        """
        # Given
        train_inputs: List[pd.DataFrame] = []

        # When
        model: Model = train_model(inputs=train_inputs,
                                 epochs=1,
                                 batch_size=1)

        # Then
        self.assertIsNotNone(model)
    def pass_arguments_for_training(self):
        embedding_model_path = self.embeddingModelLineEdit.text()
        if embedding_model_path == "":
            self.display_error_message('Please, select embedding model for training!')
            return

        data_set_directory = self.dataSetDirectoryLineEdit.text()
        if data_set_directory == "":
            self.display_error_message('Please, select data set directory!')
            return

        vector_size = self.vectorSizeSpinBox.value()
        if vector_size <= 0:
            self.display_error_message('The vector size must be greater than zero!')
            return

        kernel_size = self.kernelSizeSpinBox.value()
        if kernel_size <= 0:
            self.display_error_message('The kernel size must be greater than zero!')
            return

        nb_filters = self.FiltersNumberSpinBox.value()
        dense_output = self.denseOutputSpinBox.value()
        pool_size = self.poolSizeSpinBox.value()
        epochs = self.epochsSpinBox.value()
        learning_rate = self.learningRateSpinBox.value()
        momentum = self.momentumSpinBox.value()
        decay = self.decaySpinBox.value()
        dropout = self.dropoutSpinBox.value()
        training_pecrentage = self.trainingValidationSlider.value()
        training_pecrentage = training_pecrentage / 100.0

        self.is_training = 1

        X, Y = prepare_input(self, embedding_model_path, data_set_directory, int(vector_size))
        if X is None or Y is None:
            self.is_training = 0
            self.trainingSummaryTextEdit.clear()
            self.trainingSummaryTextEdit.repaint()
            return

        self.trained_model = train_model(self, X, Y, int(vector_size), int(kernel_size),
                                         int(nb_filters), int(pool_size), int(dense_output), learning_rate,
                                         momentum, decay, int(epochs), dropout, training_pecrentage)

        self.display_information_message('The training has been finished!')
        self.is_training = 0

        if self.trained_model is not None:
            self.saveModelButton.setEnabled(True)
Exemplo n.º 8
0
    def train(self, dt):
        Clock.unschedule(self.train)
        train_model(90, 1, 4)
        train_model(90, 1, 2)
        self.eyeAccuracy = validate_model(90, 0, 4)
        self.mouthAccuracy = validate_model(90, 0, 2)

        if (self.eyeAccuracy < 80) or (self.mouthAccuracy < 80):
            failLayout = FloatLayout(size=(500, 300))
            self.countdown = Label(
                id='countdown',
                text='Failed, Collecting New Samples in 5 Seconds',
                pos_hint={
                    'x': 0,
                    'center_y': .5
                },
                font_size='20sp')
            failLayout.add_widget(self.countdown)
            self.clear_widgets()
            self.add_widget(failLayout)
            self.time = 5
            Clock.schedule_interval(self.update, 1)
        else:
            self.manager.current = 'smartcontrol'
Exemplo n.º 9
0
    def update_model(self, train_path, from_gne=True, nb_epoch=50, test_path=None):
       """
       根据旧模型进行调优
       """
       model_names = [fname.split('/')[-1].split('.')[0] for fname in os.listdir('model')]
       if self.model_name not in model_names:
           raise NameError("model name %s has not existed! please create model first." % model_name)

       model, ctc_model = build_model(self.width, self.height, self.code_len, self.n_class)
       if from_gne:
           ctc_model.load_weights("model/gne_weights.h5")
       else:
           ctc_model.load_weights("model/%s_weights.h5" % self.model_name)

       model = train_model(train_path, self.ctable, model, ctc_model, self.model_name, code_len=self.code_len, nb_epoch=nb_epoch, test_path=test_path)
       model.save('model/%s.h5' % self.model_name)
       return model
Exemplo n.º 10
0
    def test_all_pipeline_with_none(self) -> None:  # pylint: disable=R0201
        """ Test prepare_dataset() with no data.
        """
        # Given
        input_raw: pd.DataFrame = pd.DataFrame(
            data={1, 2, 3})
        train_inputs: List[pd.DataFrame] = []
        validate_files: List[str] = []

        # When
        train_inputs.append(build_features(prepare_dataset(input_raw=input_raw)))
        model: Model = train_model(inputs=train_inputs,
                                 epochs=1,
                                 batch_size=1)
        metrics: Mapping[str,Any] = evaluate_model(model, validate_files)

        # Then
        self.assertGreater(metrics['auc'], 0.8)
Exemplo n.º 11
0
    def post(self):
        #data = request.get_json()
        #image_data = data["image_data"]
        #filename = data["filename"]
        #direction = data["direction"]

        #decode_image(image_data)
        #if direction == "right":
        #	flip_image()
        try:
            accuracy = train_model(8, 6, 0.15, '_vgg_16_2x20')
            return jsonify({
                "message":
                "The model is trained successfully with accuracy of" + accuracy
            })
        except:
            return jsonify({
                "message":
                "Error occured while training the model. Try again later"
            })
Exemplo n.º 12
0
    def update_model(self,
                     train_path,
                     acc=0.92,
                     nb_epoch=50,
                     from_gne=True,
                     test_path=None):
        """
       根据旧模型进行调优
       :type str: train_path 训练数据集路径
       :type float: acc: 训练集准确率停止条件
       :rtype :object model 训练好的预测模型
       """
        model_names = [
            fname.split('/')[-1].split('.')[0] for fname in os.listdir('model')
        ]
        if self.model_name not in model_names:
            raise NameError(
                "model name %s does not exist! please create it first." %
                model_name)

        model, ctc_model = build_model(self.width, self.height, self.code_len,
                                       self.n_class)
        if from_gne:
            ctc_model.load_weights("model/gne_weights.h5")
            for layer in model.layers[:5]:
                layer.trainable = False
        else:
            ctc_model.load_weights("model/%s_weights.h5" % self.model_name)
        print model.summary()
        model = train_model(train_path,
                            self.ctable,
                            model,
                            ctc_model,
                            self.model_name,
                            self.code_len,
                            acc=acc,
                            nb_epoch=nb_epoch,
                            test_path=test_path)
        model.save('model/%s.h5' % self.model_name)
        return model
Exemplo n.º 13
0
y_train = data.y_train['walking']
opt_flows_train = data.optical_flows_train['walking']
motion_representations_train = data.motion_representations_train['walking']

X_test = data.X_test['walking']
y_test = data.y_test['walking']
opt_flows_test = data.optical_flows_test['walking']

# now, tune the model
learning_rates = [1e-7, 1e-6, 1e-4, 1e-3, 1e-2]
losses = []
best_rate = None
best_loss = np.inf

"""
Runs 100 epochs with different learning rates and writes losses to file
"""

for lr in learning_rates:
    print('Training for learning rate {}'.format(lr))
    epoch_losses = train_model(X_train, y_train, dt, \
                               opt_flows_train, motion_representations_train, baseline = False, \
                                lr, epochs = 100, tune = True, print_every = 10)
    print()
    losses.append(epoch_losses)
    if epoch_losses[-1] < best_loss:
        best_loss = epoch_losses[-1]
        best_rate = lr  
    np.save(os.path.dirname(os.getcwd()) + '/tune/{}_lr'.format(lr), epoch_losses)
print('Best learning rate found for 100 epochs of training {}'.format(best_rate))
Exemplo n.º 14
0
import numpy as np
import pandas as pd
import tensorflow as tf
from train_model import *

fold = 1

train_fold = pd.read_csv('test_fold_{}.csv'.format(fold), index_col=0)
test_fold = pd.read_csv('test_fold_{}.csv'.format(fold), index_col=0)

cols = [
    'amount', 'oldbalanceOrg', 'newbalanceOrig', 'oldbalanceDest',
    'newbalanceDest'
]  ##

numerical_cols = {}
for i in cols:
    numerical_cols[i] = tf.feature_column.numeric_column(i)
feat_cols = [numerical_cols[things] for things in numerical_cols]

train_model(train_data=train_fold,
            valid_data=test_fold,
            feat_cols=feat_cols,
            fold=fold)
Exemplo n.º 15
0
def main():
    for i in list(range(4))[::-1]:
        print(i + 1)
        time.sleep(1)

    game_over = np.load('end_screen.npy')
    paused = False
    filename = "training_data.npy"
    training_data = []
    screen_old = grab_screen(region=(510, 860, 640, 920))
    screen_old = cv2.cvtColor(screen_old, cv2.COLOR_BGR2GRAY)

    while True:
        if not paused:
            screen_game = grab_screen(region=(696, 420, 970, 950))
            screen_game = cv2.cvtColor(screen_game, cv2.COLOR_BGR2GRAY)
            screen = cv2.resize(screen_game, (60, 60))

            # making moves
            with graph.as_default():
                prediction = model.predict([screen.reshape(WIDTH, HEIGHT,
                                                           1)])[0]
                moves = list(np.around(prediction))
                translate_move(moves)

            # recording moves
            keys = key_pressed()
            output = keys_to_output(keys)
            training_data.append([screen, output])

            screen_end = grab_screen(region=(600, 400, 620, 420))
            screen_end = cv2.cvtColor(screen_end, cv2.COLOR_BGR2GRAY)

            screen_score = grab_screen(region=(510, 860, 640, 920))
            screen_score = cv2.cvtColor(screen_score, cv2.COLOR_BGR2GRAY)

            if (screen_old != screen_score).any():
                increase_score()
                screen_old = screen_score

            if (screen_end == game_over).all():
                if check_score() > check_high_score() and check_score() > 2:
                    set_high_score(check_score())
                    K.clear_session()
                    np.save(filename, training_data)
                    # here we will do the re-training
                    balance_data()
                    train_model()
                time.sleep(0.5)
                keyboard.press_and_release('enter')
                keyboard.press_and_release('enter')
                print(check_score())
                training_data = []
                reset_score()

            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break

        if keyboard.is_pressed('p'):
            if paused:
                paused = False
                time.sleep(1)
            else:
                paused = True
Exemplo n.º 16
0
                        help="select your model",
                        default=None)
    parser.add_argument("-d", "--dataset",
                        help="select your dataset")
    parser.add_argument("-c", "--computer",
                        help="select your dataset")

    args = parser.parse_args()
    model1_configuration_name = args.model1
    model2_configuration_name = args.model2
    model3_configuration_name = args.model3
    model4_configuration_name = args.model4
    dataset_configuration_name = args.dataset
    computer_configuration_name = args.computer

    train_model(model1_configuration_name,
                dataset_configuration_name,
                computer_configuration_name)
    if model2_configuration_name:
        train_model(model2_configuration_name,
                    dataset_configuration_name,
                    computer_configuration_name)
    if model3_configuration_name:
        train_model(model3_configuration_name,
                    dataset_configuration_name,
                    computer_configuration_name)
    if model4_configuration_name:
        train_model(model4_configuration_name,
                    dataset_configuration_name,
                    computer_configuration_name)
Exemplo n.º 17
0
from params import *
from print_save import *
import time
import os

os.environ["CUDA_VISIBLE_DEVICES"] = gpu_index
print('GPU_INDEX:  ', gpu_index)

if __name__ == '__main__':
    path_excel = '../experiment_result/' + DATASET_T + '_' + MODEL + '_' + str(
        int(time.time())) + str(int(random.uniform(100, 900))) + '.xlsx'
    para = [
        DATASET_T, DATASET_S, MODEL, LR_REC, LR_DOM_pos, LR_DOM_neg, LAMDA,
        LR_REC_s, LAMDA_s, LAYER, EMB_DIM, BATCH_SIZE, SAMPLE_RATE, N_EPOCH,
        TEST_VALIDATION, TOP_K, OPTIMIZATION, IF_PRETRAIN
    ]
    para_name = [
        'DATASET', 'DATASET_SOURCE', 'MODEL', 'LR_REC', 'LR_DOM_pos',
        'LR_DOM_neg', 'LAMDA', 'LR_REC_s', 'LAMDA_s', 'LAYER', 'EMB_DIM',
        'BATCH_SIZE', 'SAMPLE_RATE', 'N_EPOCH', 'TEST_VALIDATION', 'TOP_K',
        'OPTIMIZATION', 'IF_PRETRAIN'
    ]
    ## print and save model hyperparameters
    print_params(para_name, para)
    save_params(para_name, para, path_excel)
    ## train the model
    train_model(para, path_excel)

    # try: train_model(para, path_excel)
    # except: continue
Exemplo n.º 18
0
from detect_faces import *
from train_model import *
from os import listdir
from os.path import isfile, join
from preprocess_data import *

# Usage:
# 1) Set DATA_DIR in preprocess_data.py to point to folder into which training images are
# 2) Set LABEL_DIR in preprocess_data.py to point to the folder into which labels are
# 3) call process_data
# 4) call train_model
# 5) call detect_faces

#proces_data('train_labels.csv', 6)
'''train_model('train_data/data50x50.pkl', 'trained_models/model50x50.pkl',
            cv_fold_evaluation=0, verbose=True)
train_model('train_data/data25x25.pkl', 'trained_models/model25x25.pkl',
             cv_fold_evaluation=0, verbose=True)
'''

test_detection_path = 'test_j/detection_images/images'
onlyfiles = [
    f for f in listdir(test_detection_path)
    if isfile(join(test_detection_path, f))
]

results = []
for file in onlyfiles:
    file = os.path.join(test_detection_path, file)
    img_name = file
    print(file)
Exemplo n.º 19
0
import os
os.chdir('/Users/julianchu/Desktop/Emotion recognition')
#from data import *
import matplotlib.pyplot as plt
import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
import json
import pickle
from train_model import *
#%%
# define constants
CLASSES = ['angry', 'fear', 'happy', 'neutral', 'sad',
           'surprise']  #, 'disgust']
NUM_CLASSES = len(CLASSES)
#%%
# load trianing data
# train_X and train_Y are numpy arrays
train_X = pickle.load(open('processed_data/X_train_new_augmented.pickle',
                           'rb'))
train_Y = pickle.load(open('processed_data/Y_train_new_augmented.pickle',
                           'rb'))
val_X = pickle.load(open('processed_data/X_val_new.pickle', 'rb'))
val_Y = pickle.load(open('processed_data/Y_val_new.pickle', 'rb'))
#print('showing image with label ', CLASSES[train_Y[0]])
#plt.imshow(train_X[0])
train_Y = keras.utils.to_categorical(train_Y, NUM_CLASSES)
val_Y = keras.utils.to_categorical(val_Y, NUM_CLASSES)
#%%
# train different models and save them
Exemplo n.º 20
0
raw_data_path = f"raw_data_with_{FEATURE_TYPE}.pickle"

# Load audio segments using pydub
# raw_data = load_raw_audio(FEATURE_TYPE)
# with open(raw_data_path, 'wb') as f:
#     pickle.dump(raw_data, f)
# exit(0)

with open(raw_data_path, 'rb') as f:
    raw_data = pickle.load(f)

print("background len: " + str(len(raw_data.backgrounds[0])))
print("number of keyword: " + str(len(raw_data.keywords.keys())))
print("keywords[0] len: " + str(len(raw_data.keywords['on'][0].audio)))
print("keywords[1] len: " + str(len(raw_data.keywords['off'][0].audio)))
# print(f"mean:{raw_data.mean.shape} std:{raw_data.std.shape}")

weight_param_path = f"model/kmn_cnnbidirectctc_{FEATURE_TYPE}.weights.best.hdf5"
# model_dilation
model = build_model(weight_param_path, create_model=create_model_cnn_bidirect)
model.summary()
# model_dilation
np.random.seed(19)
set_random_seed(19)

for i in range(0, 4):
    print(f"num:{i}")
    train_model(model, raw_data, weight_param_path,
                detect_wakeword=False,
                feature_type=FEATURE_TYPE)
Exemplo n.º 21
0
## author @Wenhui Yu, [email protected]

from train_model import *
from params import *
from print_save import *
import time
import os

os.environ["CUDA_VISIBLE_DEVICES"] = GPU_INDEX
print('GPU INDEX: ', GPU_INDEX)

# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

if __name__ == '__main__':
    path_excel = '../experiment_result/' + DATASET + '_' + MODEL + '_' + str(
        int(time.time())) + str(int(random.uniform(100, 900))) + '.xlsx'
    para = [
        DATASET, MODEL, LR, LAMDA, EMB_DIM, BATCH_SIZE, SAMPLE_RATE, N_EPOCH,
        TEST_VALIDATION, TOP_K
    ]
    para_name = [
        'DATASET', 'MODEL', 'LR', 'LAMDA', 'EMB_DIM', 'BATCH_SIZE',
        'SAMPLE_RATE', 'N_EPOCH', 'TEST_VALIDATION', 'TOP_K'
    ]
    ## print and save model hyperparameters
    print_params(para_name, para)
    save_params(para_name, para, path_excel)
    ## train the model
    train_model(para, path_excel, IF_SAVE_EMB)
    '''
    Training Config
    '''
    BATCH_SIZE = 30
    LR = 0.001
    EPOCH = 100
    Pretrained = False
    NUM_CLASSES = 190

    root = "../DatasetA/"

    dataloaders = getImageLoaders(root, BATCH_SIZE)

    # model_ft = ClassificationModelResenet18(pretrained=Pretrained, NUM_CLASSES=NUM_CLASSES)
    model_ft = models.resnet152(num_classes=NUM_CLASSES)
    # model_ft = models.inception_v3(num_classes=NUM_CLASSES)
    # model_ft = models.resnet18()
    # num_ftrs = model_ft.fc.in_features
    # model_ft.fc = nn.Linear(num_ftrs, NUM_CLASSES)

    criterion = nn.CrossEntropyLoss()

    # 如你所见, 所有参数都将被优化
    optimizer_ft = optim.SGD(model_ft.parameters(), lr=LR, momentum=0.9)

    model_ft = train_model(model_ft, criterion, optimizer_ft, dataloaders,
                           num_epochs=EPOCH)


Exemplo n.º 23
0
## For latent embedding pre-training
## author@Wenhui Yu  2020.06.02
## email: [email protected]

from train_model import *
from params import *
from print_save import *
import os

os.environ["CUDA_VISIBLE_DEVICES"] = GPU_INDEX

if __name__ == '__main__':
    para = [
        GPU_INDEX, DATASET, MODEL, LR, LAMDA, EMB_DIM, BATCH_SIZE,
        TEST_USER_BATCH, SAMPLE_RATE, N_EPOCH, TOP_K
    ]
    para_name = [
        'GPU_INDEX', 'DATASET', 'MODEL', 'LR', 'LAMDA', 'EMB_DIM',
        'BATCH_SIZE', 'TEST_USER_BATCH', 'SAMPLE_RATE', 'N_EPOCH', 'TOP_K'
    ]
    ## print model hyperparameters
    print_params(para_name, para)
    ## train the model
    train_model(para)