def test_build(self): net = Builder() model = net.build(width=ImageSize.WIDTH, height=ImageSize.HEIGHT, depth=3, classes=2) assert 11 == len(model.layers)
def build_polluter(win_size: int): builder = ModelBuilder(win_size) model = builder \ .build('fc', 4, [25, 50, 100, 200]) \ .build('fc', 4, [200, 100, 50, 25]) \ .build('fc', 1, [win_size], activation_list=['none']) \ .to_model() return model
def generate_model(c1, c2): rh = RequestHandler(c1, c2) pd = PrepareData(c2) mb = ModelBuilder(pd.prepare(rh.get_data())) mb.train_model() mb.save_model("model_{}_{}".format(c1, c2)) logging.info("Model trained and saved for {}/{}".format(c1, c2))
def build_models(self, grid_search=False): train_X, test_X = self.scale_features() all_predictions = {} for classifier in self.eval_classifiers: clf = self.eval_classifiers[classifier] predictor = ModelBuilder(train_X, self.train_y, clf) # trained_clf = predictor.train_classifier_stratified_cv_grid_search(classifier_name=classifier, # classifier=clf, # grid_search=grid_search, # params_grid=self.eval_classifiers_params_grid[classifier]) trained_clf = predictor.train_classifier_nested_stratified_cv_grid_search( classifier_name=classifier, classifier=clf, grid_search=grid_search, params_grid=self.eval_classifiers_params_grid[classifier]) # trained_clf = predictor.train_classifier_stratified_cv(classifier_name=classifier, # classifier=clf, # cv_folds=10) # Predict on test set test_predictions = predictor.predict_with_classifier( test_X=test_X, classifier_name=classifier, classifier=trained_clf) all_predictions[classifier + '_pred'] = test_predictions # Predict on train set test_predictions = predictor.predict_with_classifier( test_X=train_X, classifier_name=classifier, classifier=trained_clf) curr_model_performance = self.evaluate_performance( self.train_y, test_predictions) print("MSE of {} alone on train set:{}".format( classifier, curr_model_performance)) print('-' * 68) final_prediction = self.get_ensemble_average_vote(all_predictions) return all_predictions, final_prediction
def _build_nn(win_sz: int): """ build the classifier of the :param win_sz: :return: """ builder = ModelBuilder(win_sz) model = builder \ .build('fc', 2, [50, 50]) \ .build('fc', 1, [4], activation_list=['none']) \ .to_model() return model
def generateVideoButton(self): # for i in range(len(self.shapes)): # print " " # print self.shapes[i].name # for j in range(len(self.shapes[i].faces)): # print self.shapes[i].faces[j].faceOrientation, " : ", self.shapes[i].faces[j].facePoints mb = ModelBuilder() Models = [] for i in self.shapes: each_model = mb.BuildModel(i) # Print out the 3D model's vertex and texel coordinate # print "Print out the 3D model's vertex and texel coordinate---------------------------" # for j in each_model: # # j is one polygon # print "Vertex is:" # for k in j.Vertex: # print k.x, k.y, k.z # print "Texel is:" # for n in j.Texel: # print n.u, n.v Models.append(each_model) print "Models list size: ", len(Models) img = cv2.imread("project.png",cv2.CV_LOAD_IMAGE_COLOR) texture = Texture(img) points = [] for i in range(0,len(Models),1): pointsOfEachModel = [] if (i<5): # for single model building 4,11,12,13 and ground fileIndex = i for j in range(len(Models[i])): # j is surfaces of each model pointsOfEachFace = texture.putTexture(Models[i][j]) pointsOfEachModel.extend(pointsOfEachFace) elif i==5: #5-6 compound building 10 fileIndex = 5 for j in range(5, 7): for k in range(len(Models[j])): pointsOfEachFace = texture.putTexture(Models[j][k]) pointsOfEachModel.extend(pointsOfEachFace) elif i==7: #7-12 compound building 9 fileIndex = 6 for j in range(7, 13): for k in range(len(Models[j])): pointsOfEachFace = texture.putTexture(Models[j][k]) pointsOfEachModel.extend(pointsOfEachFace) elif (i-13)>=0 and (i-13)%2==0: #compound buildings 1-8 multiple = (i-13)/2 fileIndex = 7 + multiple for j in range(i, i+2): for k in range(len(Models[j])): pointsOfEachFace = texture.putTexture(Models[j][k]) pointsOfEachModel.extend(pointsOfEachFace) else: continue points = pointsOfEachModel fileRGB = open("Models/model_"+str(fileIndex)+".dat", "w+") for k in range(len(pointsOfEachModel)): point = "{0},{1},{2},{r},{g},{b}\n".format(points[k].x, points[k].y,points[k].z,r=points[k].r, g=points[k].g, b=points[k].b) fileRGB.write(point) print "Model "+str(fileIndex)+":"+str(k)+" points generated" print "----------UI Phase Finished----------" print "All models have been generated, please use main.py to generate fraems of video"
from ModelBuilder import ModelBuilder from ModelEvaluator import ModelEvaluator from DataTransformer import multi_csv_to_dataset from ModelLoader import ModelLoader dataset = multi_csv_to_dataset([ 'test_data/SHOP_daily.csv', # 'test_data/TD_daily.csv', # 'test_data/ENB_daily.csv', # 'test_data/BA_daily.csv', # 'test_data/TSLA_daily.csv' ]) model_loader = ModelLoader() #test_data = ModelBuilder().build_model(dataset, 150) #model_loader.save_model(test_data.model, 'multistock-2020-04-09') test_data = ModelBuilder().split_test_data(dataset, 0.7) test_data.model = model_loader.load_model('multistock-2020-04-09.h5') evaluator = ModelEvaluator() evaluator.analyze(test_data) evaluator.plot(test_data)
random_state=42) # convert the labels from integers to vectors trainY = to_categorical(trainY, num_classes=2) testY = to_categorical(testY, num_classes=2) # construct the image generator for data augmentation aug = ImageDataGenerator(rotation_range=30, width_shift_range=0.1, height_shift_range=0.1, shear_range=0.2, zoom_range=0.2, horizontal_flip=True, fill_mode="nearest") net = ModelBuilder() # initialize the model print("[INFO] compiling model...") model = net.build(width=ImageSize.WIDTH, height=ImageSize.HEIGHT, depth=3, classes=2) opt = Adam(lr=MetaParams.INIT_LR, decay=MetaParams.INIT_LR / MetaParams.EPOCHS) model.compile(loss="binary_crossentropy", optimizer=opt, metrics=["accuracy"]) # train the network print("[INFO] training network...") H = model.fit_generator(aug.flow(trainX, trainY, batch_size=MetaParams.BS), validation_data=(testX, testY), steps_per_epoch=len(trainX) // MetaParams.BS,
from RequestHandler import RequestHandler from PrepareData import PrepareData from ModelBuilder import ModelBuilder import argparse def get_args(): parser = argparse.ArgumentParser("Preparing data and building model") parser.add_argument('-f', '--filename', help="Filename for saved keras model - in /models/", type=str) parser.add_argument('-b', '--batch_size', help="Batch size for training model", type=int) parser.add_argument('-e', "--epochs", help="Number of epochs for training model", type=int) return parser.parse_args() if __name__ == "__main__": args = get_args() pd = PrepareData(RequestHandler().get_data()) model_builder = ModelBuilder(pd.prepare()) model_builder.train_model(args.batch_size, args.epochs) model_builder.save_model(args.filename)
global len_comm, len_pr_comm len_comm, en_comm_len = Utils.encode_docs(tok, input_politics, "comment") len_pr_comm, en_pr_comm_len = Utils.encode_docs(tok, input_politics, "parent_comment") embedding_matrix = Utils.create_embeddings(vocab_size, tok) if choice == "train": y = np.array(input_politics["label"]) y = to_categorical(y, num_classes=None) x = np.concatenate((en_pr_comm_len, en_comm_len), axis=1) opt = "adam" loss = "binary_crossentropy" path = opt + "-" + loss builder = ModelBuilder(vocab_size=vocab_size, embedding_matrix=embedding_matrix) model = builder.multi_input_model(len_pr_comm, len_comm, optimizer=opt, loss=loss) train_model(model, x, y, 20) if choice == "test": politics_test = init_filtered_data( "/home/shariq/MSc/Research/dataset/test-politics.csv") y_ = np.array(politics_test["label"]) y1 = to_categorical(y_, num_classes=None) en_test_comm = Utils.encode_test_docs(tok, politics_test, 'comment', len_comm) en_test_pr_comm = Utils.encode_test_docs(tok, politics_test,
if __name__ == "__main__": stan_file_path = sys.argv[1] results_file = sys.argv[2] data_file = sys.argv[3] try: output_file = sys.argv[4] except: output_file = "anyhow_this_file_doesnt_exist" try: # template file templatefile = sys.argv[5] print("Computing Graph features...") modelBuilder = ModelBuilder(templatefile) graph_features = modelBuilder.get_features() print("Done Graph features...") except Exception as e: import traceback traceback.print_exc(e) graph_features = None try: pyrofile = sys.argv[6] print(pyrofile) print("Computing Pyro features...") pyrofeatureparser = PyroFeatureParser() pyro_features = pyrofeatureparser.get_pyro_features(pyrofile) print("Done Pyro features...")
########## Load the model files into a list ########## models = {} for l in file: line = l.rstrip() if verbose: print(line) if line[0] == '#': continue elif line[0] == 'f': filename = line.split(',')[1] if verbose: print("models[%d]" % len(models), end=" " ) models[len(models)] = ModelBuilder(filename, verbose) elif ( line[0] == 'i' ): vals = line.split(',') if ( len(vals) < 7 ): print("Unknown line format %s" % line) continue models[len(models)-1].set_initial_values(vals[1:7]) file.close() ############################## Set up the window ############################## window = pyglet.window.Window(width=1280, height=720, resizable=True) keys = key.KeyStateHandler() window.push_handlers(keys)
from FeatureVectorBuilder import FeatureVector from ModelBuilder import ModelBuilder fv = FeatureVector() # reading our labeled training data from text file trainX, trainY = fv.readData('data/train.txt') # now we build and test our model mb = ModelBuilder() mb.svm(trainX, trainY)