def main(): # Load Data Set print("Loading practice data set.") X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_practice_dataset() # Show some images (optional of course : ) ) # show_some_images(X_train_orig, X_test_orig) test_model = DeepNeuralNetwork(X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes) for i in range(5): parameters, accuracies = test_model.train(num_epochs = 1500, print_cost = True) new_model = PredictionModel(parameters, accuracies) parameters, accuracies = load_model() previous_model = PredictionModel(parameters, accuracies) if new_model > previous_model : print("\n\tNew model is better... Displaying accuracies and updating files.. ") print(new_model) save_model(new_model.parameters, new_model.accuracies) else: print("Previous model is superior.") parameters, accuracies = load_model() best_model = PredictionModel(parameters, accuracies) print(best_model)
def main(): # logger.info("Initiating the integrity check") # error_code = check_helper(MANIFEST_FILE, GDC_DATA_DIR) # if error_code != 0: # logger.error("Issue with verifying integrity") # return -1 # logger.info("Initializing json file generator") # error_code = json_file_generator(INPUT_JSON_FILE, OUTPUT_CSV_FILE) # if error_code != 0: # logger.error("Issue with generating csv file in stage 2") # return -1 # logger.info("Initializing meta data generator") # meta_object = RequestMeta(OUTPUT_CSV_FILE, OUTPUT_FILE_META_TSV, OUTPUT_CASE_META_TSV) # error_code = meta_object.meta_file_generate() # if error_code != 0: # logger.error("Issue with generating meta files") # return -1 # logger.info("Successfully created meta data file") logger.info("Initializing matrix generator") matrix_generator_object = MatrixGenerator(GDC_DATA_DIR, OUTPUT_FILE_META_TSV, OUTPUT_CSV_MATRIX_FILE) error_code = matrix_generator_object.generator() if error_code != 0: logger.info("Issue with generating matrix csv") return -1 logger.info("Successfully generated matrix csv") logger.info("Running prediction model") model_object = PredictionModel(OUTPUT_CSV_MATRIX_FILE) model_object.run_model()
def improve_prediction_model(self, epochs=5): # Load Data Set print("Loading data set.") X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_practice_dataset( ) # Show some images (optional of course : ) ) # show_some_images(X_train_orig, X_test_orig) test_model = DeepNeuralNetwork(X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes) for i in range(epochs): parameters, accuracies = test_model.train(num_epochs=epochs, print_cost=True) new_model = PredictionModel(parameters, accuracies) if new_model > self: print( "\n\tNew model is better... Displaying accuracies and updating files.. " ) self = new_model print(self) self.save_model() else: print("Previous model is superior or equivalent.") print(self)
def home(): form = OriginalTextForm() if form.generate.data: data = pd.read_csv("random_dataset.csv") index = randrange(0, len(data) - 1, 1) original_text = data.loc[index].text form.original_text.data = str(original_text) return render_template('home.html', form=form, output=False) elif form.predict.data: if len(str(form.original_text.data)) > 10: model = PredictionModel(form.original_text.data) return render_template('home.html', form=form, output=model.predict()) return render_template('home.html', form=form, output=False)
def load_model(self, model="dnn_best"): parameters = { 'W1': np.load('../../models/' + model + '/paramW1.npy'), 'b1': np.load('../../models/' + model + '/paramb1.npy'), 'W2': np.load('../../models/' + model + '/paramW2.npy'), 'b2': np.load('../../models/' + model + '/paramb2.npy'), 'W3': np.load('../../models/' + model + '/paramW3.npy'), 'b3': np.load('../../models/' + model + '/paramb3.npy') } accuracies = { 'train_accuracy': np.load('../../models/' + model + '/trainaccuracy.npy'), 'test_accuracy': np.load('../../models/' + model + '/testaccuracy.npy') } return PredictionModel(parameters, accuracies)
def predict(original_text): #text = 'CAIRO (Reuters) - Three police officers were killed and eight others injured in a shoot-out during a raid on a suspected militant hideout in Giza, southwest of the Egyptian capital, two security sources said on Friday. The sources said authorities were following a lead to an apartment thought to house eight suspected members of Hasm, a group which has claimed several attacks around the capital targeting judges and policemen since last year. The suspected militants fled after the exchange of fire there, the sources said. Egypt accuses Hasm of being a militant wing of the Muslim Brotherhood, an Islamist group it outlawed in 2013. The Muslim Brotherhood denies this. An Islamist insurgency in the Sinai peninsula has grown since the military overthrew President Mohamed Mursi of the Muslim Brotherhood in mid-2013 following mass protests against his rule. The militant group staging the insurgency pledged allegiance to Islamic State in 2014. It is blamed for the killing of hundreds of soldiers and policemen and has started to target other areas, including Egypt s Christian Copts. ' model = PredictionModel(original_text) return jsonify(model.predict())
config['title'] = title crop = int(crop) + 1 dataset = Dataset() dataset.load_multivariate(dataset_folder, crop) #dataset.load_ucr_univariate_data(dataset_folder=dataset_folder) config['dataset:length'] = dataset.series_length config['dataset:num_channels'] = dataset.num_channels config['dataset:num_classes'] = dataset.num_classes # create the model model = None config['encoder_type'] = sys.argv[2] config['aggregation_type'] = sys.argv[3] # create the prediction model if model_type == 'standard': model = PredictionModel(config) elif model_type == 'supervised': model = CNNAndRNN(config) model.create_prediction_model() print("This model has", model.num_model_parameters(), "parameters") opt = Optimizer(config=config, dataset=dataset, model=model) opt.optimize()
def __init__(self, config): PredictionModel.__init__(self, config) self.name = 'SupervisedAggregationModel'