def build_model(X_train, Y_train, X_test, Y_test): hyperModel = RegressionHyperModel((X_train.shape[1], )) tuner_rs = RandomSearch(hyperModel, objective='mse', max_trials=135, executions_per_trial=1, directory='param_opt_checkouts', project_name='GDW') tuner_rs.search(X_train, Y_train, validation_data=(X_test, Y_test), epochs=160) best_model = tuner_rs.get_best_models(num_models=1)[0] #metrics = ['loss', 'mse', 'mae', 'mape', 'cosine_proximity'] #_eval = best_model.evaluate(X_test, Y_test) #print(_eval) #for i in range(len(metrics)): # print(f'{metrics[i]} : {_eval[i]}') # history = best_model.fit(X_train, Y_train, validation_data = (X_test, Y_test), epochs=50) # best_model.save('./models_ANN/best_model') # save_model(best_model) tuner_rs.results_summary() print(load_model().summary()) predict(best_model)
def prediction(checkpoint, device, aerofoil_file, redistribute_coordinates=True, base_aerofoil=None, skiprows=0): # preprocess aerofoil aerofoil_file = Path(aerofoil_file) if redistribute_coordinates: aerofoil_redistribution(base_aerofoil, aerofoil_file, aerofoil_file.parent) # load model model, architecture, _, _ = load_model(checkpoint) # get coordinates coordinates = np.loadtxt(aerofoil_file, delimiter=" ", dtype=np.float32, skiprows=skiprows) y_coordinates = np.array(coordinates[:, 1], dtype=np.float32) # inputs as ndarrays # convert data to tensor with correct shape x = torch.from_numpy(y_coordinates).view(1, architecture['num_channels'], architecture['input_size']) x = x.to(device) model.eval() # turn off batch normalisation and dropout with torch.no_grad(): # don't add gradients of test set to computational graph ClCd, angle = model(x.float()) # predictions print(f'max lift-to-drag ratio = {ClCd.numpy()} at angle of attack = {angle.numpy()} degrees') return ClCd.numpy(), angle.numpy()
def predict(domain, model): feature_list = feature.get_features(domain) feature_np = np.array([feature_list]) feature_np = feature_np.astype(np.float64) stdsc = train_model.load_model('../model/stdsc.pkl') feature_std = stdsc.transform(feature_np) label = model.predict(feature_std) print() if label == [0]: print('result: good') elif label == [1]: print('result: bad') else: print('classify error') return label
def sentiment(): tweets_df = pickle.load(open(train_model.TWEETS_COLLECTED_DIR + 'tweets_vader.pickle', 'rb')) dt1 = datetime.datetime(2015, 10, 24, 14, 30, 0, 0, pytz.UTC) dt2 = datetime.datetime(2016, 5, 24, 14, 31, 0, 0, pytz.UTC) ticker = 'GS' vocab_file_path = '../Data/vocab_merged_old2.txt' model_path = '../Result_Data_Storage/sst_sent140_fusion/params_5epochs.pickle' lstm_model = train_model.load_model(vocab_file_path, model_path) granger_df = perform_analysis.df_for_granger(tweets_df, ticker, model=lstm_model, vocab=train_model.load_vocab(vocab_file_path)) pickle.dump(granger_df, open('../Data/granger_df_lstm_mfst.pickle', 'wb')) #granger_df = pickle.load(open('../Data/granger_df_lstm.pickle', 'rb')) granger_df = granger_df[np.logical_not(granger_df.isnull()['sentiment'])] print(granger_df) perform_analysis.granger_analysis(granger_df)
from train_model import (stack_data, create_y, read_data, load_model, TEST_FILE) test_data = read_data(TEST_FILE) X_test, y_test = stack_data(test_data), create_y(test_data) model = load_model() predict = model.predict(X_test) print(predict)
filename_dict = { '1': 'bagging.pkl', '2': 'random_forest.pkl', '3': 'extra_tree.pkl', '4': 'adaboost.pkl', '5': 'gradient_tree_boosting.pkl', '6': 'voting_classifier.pkl' } while True: choice = input() if choice in ['1', '2', '3', '4', '5', '6']: break elif choice == '0': print('quit') sys.exit() else: print('input error') file = '../model/' + filename_dict[choice] flag = os.path.exists(file) if flag == True: model = train_model.load_model(file) domain = input( '\nPlease input the payload that you want to detection:\n') predict(domain, model) else: print( '\nPlease train this model before using or use another model to predict' )
def run(): parser = argparse.ArgumentParser(description='Pascal VOC 2012 Classifier') parser.add_argument('--no-cuda', action='store_true', default=False, help='disables CUDA training') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--test-batch-size', type=int, default=32, metavar='N', help='input batch size for testing (default: 1000)') parser.add_argument('--mode', type=str, default='A', metavar='M', help='Mode of model') parser.add_argument('--demo_mode', type=str, default='single', metavar='M', help='Mode of demo') parser.add_argument('--image_path', type=str, default='./test.jpg', metavar='M', help='Mode of demo') # parser.add_argument('--class_name', type=str, default='aeroplane', metavar='M', # help='Mode of demo') args = parser.parse_args() use_cuda = not args.no_cuda and torch.cuda.is_available() torch.manual_seed(args.seed) device = torch.device("cuda" if use_cuda else "cpu") # Get transform _, test_transform = initialise_transforms() # Initialise model model, params = load_model() model = model.to(device) model.eval() model_name = 'pascalvoc_' + args.mode + '.pt' print('Loading model...') model.load_state_dict(torch.load(model_name)) # Convert jpg to tensor if args.demo_mode == 'single': image = Image.open(args.image_path).convert('RGB') image_tensor = test_transform(image).unsqueeze(0).to(device) # Get model prediction pred = model(image_tensor) pred = F.sigmoid(pred) display_prediction(pred, image) elif args.demo_mode == 'gui': class_to_index = utils.class_to_index() # index = class_to_index[args.class_name] # 2-part transform to preserve image after first_transform first_transform = transforms.Compose([transforms.Resize(224)]) second_transform = transforms.Compose([ transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) # Validation set test_loader = torch.utils.data.DataLoader( VOCDataset(root, 'val', transform=test_transform), batch_size=args.test_batch_size, shuffle=True) # Get predictions on validation set model.eval() all_predictions = [] start = time.time() with torch.no_grad(): for data, target in test_loader: data, target = data.to(device), target.to(device) output = model(data) output = F.sigmoid(output) target = target.float() # Precision for each class in each example for i in range(output.shape[0]): example_predictions = [] scores = target[i] * output[ i] # Ground truth as mask for predictions all_predictions.append(scores) end = time.time() print("Time lapsed: {:.2f}s".format((end - start))) print(all_predictions) else: raise Exception("Please enter demo_mode as 'single' or 'gui'")