def main(): model_train(test=False) model = model_load() print("model training complete.")
def train(): """ basic predict function for the API the 'mode' flag provides the ability to toggle between a test version and a production verion of training """ ## check for request data if not request.json: print("ERROR: API (train): did not receive request data") return jsonify(False) ## set the test flag test = False if 'mode' in request.json and request.json['mode'] == 'test': test = True print("... training model") data_dir = os.path.join("data", "cs-train") model_train(data_dir, test=False) print("... training complete") return (jsonify(True))
def train(): """ basic predict function for the API the 'mode' give you the ability to toggle between a test version and a production verion of training """ regressor = request.args.get('regressor') if regressor is None: print( "WARNING API (train): received request, but no regressor specified, assuming 'randomforest'" ) regressor = "randomforest" print("... training model") data_dir = os.path.join(THIS_DIR, "cs-train") try: model_train(data_dir, test=True, regressor=regressor) print("... training complete") # reload models and data after re-train print("... reloading models in cache") global_data, global_models = model_load(training=False) return (jsonify(True)) except Exception as e: print("ERROR API (train): model_train returned: {}".format(str(e))) return jsonify([]), 400
def test_01_train(self): """ test the train functionality """ ## train the model model_train(data_dir) self.assertTrue(os.path.exists(os.path.join("models")))
def main(): # train the model model_train(test=False) # load the model model = model_load() print("model training complete.")
def main(): # train the model data_dir = os.path.join("..", "cs-train") model_train(data_dir) # load the model model = model_load() print("model training complete.")
def main(data_dir): ## train the model model_train(data_dir, test=False) ## load the model model = model_load() print("model training complete.")
def test_01_train(self): """ test the train functionality """ # train the model model_train(data_dir, prefix='test', test=True) self.assertTrue( os.path.exists(os.path.join(model_dir, "test-all-0_1.joblib")))
def test_01_train(self): """ ensure log file is created """ log_file = os.path.join(log_dir, "train-test.log") model_train(data_dir, prefix='test', test=True) self.assertTrue(os.path.exists(log_file))
def main(): ## train the model model_train() ## load the model model = model_load() print("model training complete.")
def main(): ## train the model data_dir = os.path.join(".", "data", "cs-train") model_train(data_dir, test=False) ## load the model all_data, all_models = model_load() print("model training complete.")
def main(): data_dir = os.path.join(DATA_DIR, "cs-train") ## train the model model_train(data_dir, test=False) ## load the model model = model_load() print("model training complete.")
def test_train_file_creation(self): data_dir = join('.', 'data') work_dir = join(data_dir, 'work-data') models_dir = join('.', 'models') inpfile = join(work_dir, 'train-data-cleaned.csv') force_data_load = True if exists(inpfile): force_data_load = False model_train(data_dir=data_dir, test=True, model_dir=models_dir, force_data_load=force_data_load) outfile = join(work_dir, 'test-all-0_1') return exists(outfile)
def main(): ## train the model print("TRAINING MODELS") data_dir = os.path.join(".", "data", "cs-train") model_train(data_dir, test=True) ## load the model print("LOADING MODELS") all_data, all_models = model_load() print("... models loaded: ", ",".join(all_models.keys())) print("model training complete.")
def main(): ## train the model model_train(data_dir=DATA_DIR, prefix='sl', test=False) ## load the model all_data, all_models = model_load(country='all', prefix='sl', data_dir=DATA_DIR, training=False) print("... models loaded: ", ",".join(all_models.keys())) print("model training complete.")
def train(): if not request.json: print("ERROR: API (train): did not receive request data") return jsonify(False) test = False if 'mode' in request.json and request.json['mode'] == 'test': test = True query = request.json['query'] print("... training model") model_train(data_dir=query,test=test) print("... training complete") return(jsonify(True))
def train(): """ basic predict function for the API the 'mode' flag provides the ability to toggle between a test version and a production version of training """ # check for request data if not request.json: print("ERROR: API (train): did not receive request data") return jsonify(False) if 'mode' not in request.json: print("ERROR API (predict): received request, but no 'mode' found " "within") return jsonify(False) # set the test flag test = False if 'mode' in request.json and request.json['mode'] == 'test': test = True print("... training model") model = model_train(test=test) print("... training complete") return (jsonify(True))
def train(): """ basic predict function for the API """ print("... training model") model = model_train() print("... training complete") return (jsonify(True))
def trainModel(): response = { 'result': -1 } try: test = False env = request.args.get('env') if (env == 'test'): test = True model_train(data_dir, test=test) response['result'] = 0 except: response['error'] = 'system error:' + str(sys.exc_info()[1]) json_object = json.dumps(response, indent=4) return Response(str(json_object), mimetype="application/json")
def test_predict_result_is_numeric(self): data_dir = join('.', 'data') models_dir = join('.', 'models') work_dir = join(data_dir, 'work-data') inpfile = join(work_dir, 'train-data-cleaned.csv') force_data_load = True if exists(inpfile): force_data_load = False ukmodelfile = join(models_dir, 'test-united_kingdom-' + MODEL_VERSION + '.joblib') if not exists(ukmodelfile): model_train(data_dir=data_dir, model_dir=models_dir, test=True, force_data_load=force_data_load) pred = model_predict('united_kingdom', '2018', '1', '1', data_dir=data_dir, model_dir=models_dir, test=True) # print(pred) return pred['y_pred'] > 0
def get_train_op_old(): dataset = DataGenerator( sample_range=config.train_samples, # we have 236 samples shuffle_samples=True, max_patches_per_sample=config.max_patches_per_sample ).make_dataset( split_lhs_rhs=True ) lhs, lhs_label, rhs, rhs_label = dataset.make_one_shot_iterator().get_next() ops = model_train(lhs, lhs_label, rhs, rhs_label, params={'n_candidates': tf.constant(config.n_candidates)}) return ops
def train(): if not request.json: print("No request data") return jsonify(False) test = False if 'mode' in request.json and request.json['mode'] == 'test': test = True print("Training in progress..") data_dir = os.path.join(".", "data", "cs-train") print(data_dir) model = model_train(data_dir, test=test) print("Training is completed!") return (jsonify(True))
def train(): """ basic predict function for the API """ if not request.json: print("ERROR: API (train): did not receive request data") return jsonify(False) print("... training model") model = model_train() print("... training complete") return(jsonify(True))
def get_train_op(): dg = DataGenerator( sample_range=config.train_samples, shuffle_samples=True, max_patches_per_sample=config.max_patches_per_sample ) ds = dg.make_dataset(split_lhs_rhs=False) label_weights = dg.get_label_weights_from_dumped() print("Label weights used: ", label_weights) label_weights = tf.constant(label_weights, dtype=tf.float32) X, Y = ds.make_one_shot_iterator().get_next() ops = model_train(X, Y, params={ 'n_candidates': tf.constant(config.n_candidates), 'label_weights': label_weights }) return ops
def train(): ## check for request data if not request.json: print("ERROR: API (train): did not receive request data") return jsonify(False) ## set the test flag test = False if 'mode' in request.json and request.json['mode'] == 'test': test = True print("... training model") data_dir = os.path.join("..", "capstone-w", "cs-train") model = model_train(data_dir, test=test) print("... training complete") return (jsonify(True))
def train(): """ basic predict function for the API the 'mode' give you the ability to toggle between a test version and a production verion of training """ if not request.json: print("ERROR: API (train): did not receive request data") return jsonify(False) if 'mode' not in request.json: print("ERROR API (train): received request, but no 'mode' found within") return jsonify(False) print("... training model") model = model_train(mode=request.json['mode']) print("... training complete") return(jsonify(True))
#model 의 인자 C, Gamma값을 받아 train하는 프로그램 import model import pickle #dataset load with open('data_int.txt', 'rb') as f: data = pickle.load(f) data_row_size = len(data[0][0]) C = 1.0 gamma = 1.0 try: C = float(input("C 값 입력\n")) gamma = float(input("gamma 값 입력\n")) #기본값 except ValueError as e: print(e) print('run defult C, gamma (10000.0, 1.0)') C = 10000.0 gamma = 1.0 #model에 dataset, C, Gamma 인자를 넘겨주고, Special Char model이 아니라는 정보를 넘겨주어 model_save폴더에 저장 model.model_train(data, data_row_size, C, gamma, False)
'CS101 : Nacelle ambient temp. 2', 'CS101 : Nacelle temp.', 'CS101 : Nacelle cabinet temp.', 'CS101 : Main carrier temp.', 'CS101 : Rectifier cabinet temp.', 'CS101 : Yaw inverter cabinet temp.', 'CS101 : Fan inverter cabinet temp.', 'CS101 : Ambient temp.', 'CS101 : Tower temp.', 'CS101 : Control cabinet temp.', 'CS101 : Transformer temp.' ] output_nfs = feature_selection( scada_data, output_nf, selected_features) # features are selected in no fault dataset output_ffs = feature_selection( scada_data, output_faults, selected_features) #features are selected in faulty dataset ##### Convert to record array ##### output_nfs_rec_array = output_nfs.to_records(index=False) output_ffs_rec_array = output_ffs.to_records(index=False) ##### Prepartion for splitting ##### final_data_set = preparation_for_splitting(output_nfs_rec_array, output_ffs, output_ffs_rec_array) X_train, y_train, X_test, y_test = split(final_data_set, 'all faults', 'balanced') ##### Training a model ##### # First argument is string: Decision Tree or SVM y_pred, best_param = model_train('SVM', X_train, y_train, X_test, y_test) labels = ['no-fault', 'fault'] clfreport = classification_report(y_test, y_pred, target_names=labels) cm = confusion_matrix(y_test, y_pred)
C_min = float(input("C 값 최소치 입력\n")) C_max = float(input("C 값 최대치 입력\n")) C_interval = float(input("C 값 간격(지수) 입력\n")) gamma_min = float(input("gamma 값 최소치 입력\n")) gamma_max = float(input("gamma 값 최대치 입력\n")) gamma_interval = float(input("gamma 값 간격(지수) 입력\n")) #기본값 except ValueError as e: print(e) print('run default') C_min = 1 C_max = 10 C_interval = 10 gamma_min = 1 gamma_max = 10 gamma_interval = 10 C = C_min gamma = gamma_min while (C_max >= C): gamma = gamma_min while (gamma_max >= gamma): print("C :", C, "Gamma :", gamma) #model에 dataset, C, Gamma 인자를 넘겨주고, Special Char model이라는 정보를 넘겨주어 model_save_special폴더에 저장 model.model_train(data, data_row_size, C, gamma, True) gamma *= gamma_interval C *= C_interval
def train(): text = request.form['text'] model_train(text) print("Model training completed!") return jsonify("Model training completed")