def __init__(self): n_cam = self.countCameras() print('#webcam={}'.format(n_cam)) # initializebevideo stream self.video_stream = cv2.VideoCapture(n_cam - 1) # FIXME more accurate face detector self.face_detector = cv2.CascadeClassifier(VIDEO_PREDICTOR.face_detection_classifier) self.shape_predictor = None if NETWORK.use_landmarks: self.shape_predictor = dlib.shape_predictor(DATASET.shape_predictor_path) self.model = load_model() self.house = None self.PUBLIC_OPINIONS = dict(zip(VIDEO_PREDICTOR.emotions, VIDEO_PREDICTOR.emotions)) # color mode: BGR self.OPINION_COLORS = dict(zip(VIDEO_PREDICTOR.emotions, [(0, 0, 255), (0, 173, 255), (255, 0, 77), (255, 0, 213), (0, 255, 26)])) self.pub_op = VIDEO_PREDICTOR.emotions[-1] self.is_camera_working = True self.is_exit = False self.opinions = dict(zip(VIDEO_PREDICTOR.emotions, np.zeros(5, dtype=np.int32))) self.CHINESE_PUBLIC_OPINIONS = dict(zip(VIDEO_PREDICTOR.emotions, [u'群情激愤|>_<|', u'其乐融融 (^o^)', u'哀鸿遍野 /T_T\\', u'瞠目结舌)O.O(', u'索然无味...'])) self.CHINESE_OPINION_COLORS = dict(zip(VIDEO_PREDICTOR.emotions, [(0, 0, 255, 0), (0, 173, 255, 0), (255, 0, 77, 0), (255, 0, 255, 0), (0, 255, 26, 0)])) self.frame = None # number of detected faces as the online users number self.numUser = 0 self.numFace = 0
def __init__(self): # initializebevideo stream self.video_stream = cv2.VideoCapture(VIDEO_PREDICTOR.camera_source) self.face_detector = cv2.CascadeClassifier( VIDEO_PREDICTOR.face_detection_classifier) self.shape_predictor = None if NETWORK.use_landmarks: self.shape_predictor = dlib.shape_predictor( DATASET.shape_predictor_path) self.model = load_model() self.last_predicted_time = 0 self.last_predicted_confidence = 0 self.last_predicted_emotion = "" try: if VIDEO_PREDICTOR.send_by_osc_socket: osc_socket = create_osc_socket() if VIDEO_PREDICTOR.send_by_socket: socket = create_socket() except: print "Error while creating socket"
def predict(): # initialize the data dictionary that will be returned from the # view results = {"success": False} model_path = 'dogbreeds_model_architecture.json' weights_path = 'dogbreeds_model_weights.h5' class_path = 'classes.txt' model = load_model(model_path, weights_path) # ensure an image was properly uploaded to our endpoint if flask.request.method == "POST": if flask.request.files.get("image"): # read the image in PIL format image = flask.request.files["image"].read() image = Image.open(io.BytesIO(image)) # classify the input image and then initialize the list # of predictions to return to the client preds = make_predictions(image, model) preds_dec = decode_prediction(preds, class_path) # indicate that the request was a success predictions = pd.Series(preds_dec).sort_values(ascending=False) results["predictions"] = predictions.to_json() results["success"] = True # return the data dictionary as a JSON response return flask.jsonify(results)
def load_classifier(checkpoint_filename=DEFAULT_CHECKPOINT_FILENAME): ''' Load the classifier. Train the network if needed Arguments: checkpoint_filename (str): checkpoint filename for loading the model Returns: model (object): model loaded from checkpoint category_label_to_name (dict): dictionary for mapping category label to name ''' print('Check if checkpoint model present\n\tCheckpoint: {}'.format( checkpoint_filename)) if os.path.isfile(checkpoint_filename) == False: print('Not present. Training the model') classifier.train.train(DEFAULT_DATA_DIRECTORY, DEFAULT_MODEL_DIRECTORY, DEFAULT_NETWORK, DEFAULT_LEARNING_RATE, DEFAULT_HIDDEN_UNITS, DEFAULT_EPOCHS, DEFAULT_GPU) else: print('Ok') model, category_label_to_name = predict.load_model( DEFAULT_FILEPATH_JSON_CATEGORY, checkpoint_filename) return model, category_label_to_name
def main(): crnn = load_model(MODEL_PATH) root = DIR_NAME images = get_image_paths(root) random.shuffle(images) rec = [] ts_score = 0 crnn_score = 0 for n, image in enumerate(images, 1): result = eval_image(image, crnn) ts_score += similarity_score(result['true_text'], result['tesseract']) crnn_score += similarity_score(result['true_text'], result['crnn']) rec.append({'image': image, 'true_text': result['true_text'], 'tesseract': result['tesseract'], 'crnn': result['crnn']}) print(n, image) print(result) print(ts_score / n, crnn_score / n) print() # time.sleep(2) rec_df = pd.DataFrame(rec) rec_df.to_csv('records_10.csv')
def __init__(self): n_cam = self.countCameras() print('#webcam={}'.format(n_cam)) # initializebevideo stream self.video_stream = cv2.VideoCapture(n_cam - 1) self.face_detector = cv2.CascadeClassifier(VIDEO_PREDICTOR.face_detection_classifier) self.shape_predictor = None if NETWORK.use_landmarks: self.shape_predictor = dlib.shape_predictor(DATASET.shape_predictor_path) self.model = load_model() self.house = None # self.PUBLIC_OPINIONS = dict(zip(VIDEO_PREDICTOR.emotions, ['群情激愤', '其乐融融', '哀鸿遍野', '瞠目结舌', '索然无味'])) self.PUBLIC_OPINIONS = dict(zip(VIDEO_PREDICTOR.emotions, VIDEO_PREDICTOR.emotions)) self.OPINION_COLORS = dict(zip(VIDEO_PREDICTOR.emotions, [(0, 0, 255), (0, 173, 255), (255, 0, 77), (255, 0, 213), (0, 255, 26)])) self.pub_op = VIDEO_PREDICTOR.emotions[-1] self.is_camera_working = True self.is_exit = False self.opinions = dict(zip(VIDEO_PREDICTOR.emotions, np.zeros(5, dtype=np.int32))) self.items = dict() self.recommend_pool = None self.classified_items = dict(zip(VIDEO_PREDICTOR.emotions, np.ndarray(shape=(len(VIDEO_PREDICTOR.emotions), 0), dtype=np.int32).tolist())) self.sentiments = dict(zip(VIDEO_PREDICTOR.emotions, VIDEO_PREDICTOR.sentiment_scores)) self.frame = None self.interests = dict()
def SR_it(input_dir, output_dir, scaling_factor): base_dir = os.getcwd() file_names = [] projs = [] geos = [] SF = scaling_factor if input_dir.endswith("/"): O = input_dir.split("/")[-2] else: O = input_dir.split("/")[-1] with tf.Session() as session: network = predict.load_model(session) driver = gdal.GetDriverByName("GTiff") os.chdir(input_dir) images = glob.glob('*.tif') for image in tqdm(images): image = gdal.Open(image) geo = image.GetGeoTransform() pixW = float(geo[1]) / SF pixH = float(geo[5]) / SF geo = [geo[0], pixW, geo[2], geo[3], geo[4], pixH] #print(geo) proj = image.GetProjection() projs.append(proj) geos.append(geo) os.chdir(base_dir) if not os.path.exists(output_dir): os.mkdir(output_dir) for file_name in tqdm(os.listdir(input_dir)): file_names.append(file_name) for set_name in [O]: for scaling_factor in [SF]: dataset = data.SR_Run(set_name, scaling_factors=[scaling_factor]) for I, proj, geo, file_name in tqdm( zip(dataset.images, projs, geos, file_names)): Im = [I] prediction = predict.predict(Im, session, network, targets=None, border=scaling_factor) prediction = prediction[0] prediction = np.swapaxes(prediction, -1, 0) prediction = np.swapaxes(prediction, -1, 1) out = output_dir + str(file_name) DataSet = driver.Create(out, prediction.shape[2], prediction.shape[1], prediction.shape[0], gdal.GDT_Byte) for i, image in enumerate(prediction, 1): DataSet.GetRasterBand(i).WriteArray(image) DataSet.SetProjection(proj) DataSet.SetGeoTransform(geo) #DataSet.SetNoDataValue(0) del DataSet
def setup_multistage(data_dir, subset, model_paths, out_dir): print("Loading dataset") quartary_dataset, quartary_dataloader = get_subset_dataset_and_loader( "data/4_class_11", "val", batch_size=1) print("Loading binary model") binary_model_path = model_paths[0] binary_meta_dict = get_meta_dict_from_model_path(binary_model_path) binary_model = load_model(binary_model_path, binary_meta_dict, 2, device) print("Loading trinary model") trinary_model_path = model_paths[1] trinary_meta_dict = get_meta_dict_from_model_path(trinary_model_path) trinary_model = load_model(trinary_model_path, trinary_meta_dict, 3, device) return quartary_dataset, quartary_dataloader, binary_model, trinary_model
def classifyBtnClicked(self): image = predict.load_image(self.imgName) model = predict.load_model() prediction = model.predict(image) print(prediction) print(np.max(prediction)) print(predict.int_to_word_out[np.argmax(prediction)]) self.result_lbl.setText(predict.int_to_word_out[np.argmax(prediction)])
def __init__(self, torch_model_file, word_idx_file, target_idx_file): self.info = { 'model_file': torch_model_file, 'word_to_idx_file': word_idx_file, 'target_to_idx_file': target_idx_file } print("Loading trained model: %s" % torch_model_file) print("Loading word idx: %s" % word_idx_file) print("Loading target idx: %s" % target_idx_file) self.model, self.word_to_idx, __, self.target_word_list = load_model( torch_model_file, word_idx_file, target_idx_file)
def get_accuracy(path=TEST_PATH, model=load_model()): labels = os.listdir(path) percent_dict = {} for label in tqdm(labels): label_path = path + '/' + label sounds = os.listdir(label_path) all_counts, correctd_counts = len(sounds), 0 for sound in tqdm(sounds): if predict(label_path + '/' + sound, model) == label: correctd_counts += 1 percent_dict[label] = round((100 * correctd_counts / all_counts), 2), all_counts return percent_dict
def main(): print("Reading in command-line args...") args = parse_cmd_args() config = load_config(args.config) model = load_model(args.model, args.type, device="cuda:{}".format(args.gpu)) print("Evaluate on test set...") results = predict_on_input(model, args.type, args.path_in, config, args.charchecker, "cuda:{}".format(args.gpu)) print("Writing to file {}.".format(args.outfile)) write_to_file(results, args.outfile) print("Done.")
def test_crf(model_path, test_path, label_path): """ do label use a pre-trained crf model :param model_path: :param test_path: :param label_path: :return: """ opt = create_default_opt() crf_model = pywapiti.mdl_new(pywapiti.rdr_new(opt.maxent)) crf_model.opt = opt crf_model = load_model(crf_model, model_path) do_label(crf_model, test_path, label_path)
def test_crf(model_path, test_path, label_path): """ do label use a pre-trained crf model :param model_path: :param test_path: :param label_path: :return: """ opt = create_default_opt() crf_model = pywapiti.mdl_new( pywapiti.rdr_new(opt.maxent)) crf_model.opt = opt crf_model = load_model(crf_model, model_path) do_label(crf_model, test_path, label_path)
def __init__(self): # initializebevideo stream self.video_stream = cv2.VideoCapture(VIDEO_PREDICTOR.camera_source) self.face_detector = cv2.CascadeClassifier(VIDEO_PREDICTOR.face_detection_classifier) self.shape_predictor = None if NETWORK.use_landmarks: self.shape_predictor = dlib.shape_predictor(DATASET.shape_predictor_path) self.model = load_model() self.last_predicted_time = 0 self.last_predicted_confidence = 0 self.last_predicted_emotion = ""
def get_churn_prediction(): if request.method == 'POST': form = request.form id = form.get('id') customer = form.get('customer') surname = form.get('surname') score = form.get('score') geography = form.get('geography') gender = form.get('gender') age = form.get('age') tenure = form.get('tenure') balance = form.get('balance') products = form.get('products') crcard = form.get('crcard') activemember = form.get('activemember') salary = form.get('salary') data = { 'RowNumber': [id], 'CustomerId': [customer], 'Surname': [surname], 'CreditScore': [score], 'Geography': [geography], 'Gender': [gender], 'Age': [age], 'Tenure': [tenure], 'Balance': [balance], 'NumOfProducts': [products], 'HasCrCard': [crcard], 'IsActiveMember': [activemember], 'EstimatedSalary': [salary] } model = load_model(model_path='models/gboosting.pkl') prediction = predict(model=model, data=data) churn = 'Let him go' if prediction.get( "prediction") == 1 else 'remain a user' # return f'<h1>The user {surname} is likeky to {churn}' return render_template("prediction.html", surname=surname, churn=churn)
from predict import load_model, word_tokenize logging.root.setLevel(logging.NOTSET) FORMAT = '%(asctime)-11s %(message)s' logging.basicConfig(format=FORMAT) logger = logging.getLogger('playground') file = join(DATASETS_FOLDER, "LTA", "VNESEScorpus.txt") NUM_SENTS = 200 LOG_EVERY_NUM_SENTS = 50 # Predict base_path = Path(CACHE_ROOT) / "models/wtk_crf_4" tagger = load_model(base_path) f = open(file, "r") start = time.time() for i, line in enumerate(f): output = word_tokenize(tagger, line) if i % LOG_EVERY_NUM_SENTS == 0: logger.info(i) if i == NUM_SENTS: break end = time.time() logger.info(f"Underthesea {end-start}") f.close() # Pyvi f = open(file, "r") start = time.time()
return 'Get request to CRNN server.' @app.route('/process', methods=['POST']) def process(): image_buf = request.files['image'] image = Image.open(image_buf).convert('RGB') image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) lines = json.loads(request.form['lines']) images = [] for i, line in enumerate(lines, 0): x1, y1, x2, y2, x3, y3, x4, y4 = line images.append(image[min(y1, y2):max(y3, y4), min(x1, x4):max(x2, x3)]) global graph with graph.as_default(): outs = process_image(model.model, images) return jsonify(outs) if __name__ == '__main__': # because CUDA is being used somwhere else os.environ["CUDA_VISIBLE_DEVICES"] = "-1" logging.getLogger().setLevel(logging.ERROR) model = load_model(model_path='models/model_new_best.h5') graph = tf.get_default_graph() app.run(host='127.0.0.1', port=5002)
import sys import random as r sys.path.insert(0, 'Word2VecProto/scripts') import word2vec print(word2vec.train_prototype_model()) # ---------------------- Parameters section ------------------- # startT = timeit.default_timer() data_source = "pickle" # keras_data_set|local_dir print('Load Vocabulary') vocabulary = predict.load_dict(data_source) print('Load Model') loaded_model = predict.load_model() print('Load W2V') predict.load_w2v() stopT = timeit.default_timer() print('Load duration: {}'.format(stopT - startT)) startT = timeit.default_timer() print('Load Tiny Prototype') tiny_mode = word2vec.train_prototype_model() stopT = timeit.default_timer() print('Load Duration for Prototype{}'.format(stopT - startT)) # # ---------------------- Parameters end ----------------------- # finally: app = Flask(__name__, static_url_path="/models", static_folder='./models')
logger = logging.getLogger(__name__) # NOTE this import needs to happen after the logger is configured # Initialize the Flask application application = Flask(__name__) application.config['ALLOWED_EXTENSIONS'] = set(['pdf']) application.config['CONTENT_TYPES'] = {"pdf": "application/pdf"} application.config["Access-Control-Allow-Origin"] = "*" CORS(application) swagger = Swagger(application) model, char2idx, idx2char = load_model() def clienterror(error): resp = jsonify(error) resp.status_code = 400 return resp def notfound(error): resp = jsonify(error) resp.status_code = 404 return resp @application.route('/Stephen-King-Bot', methods=['POST'])
self.text_region_note = tk.Label( self, text="--- Type or Paste Text Here, and Press Enter ---", bg="lightgrey", fg="black", pady=10) self.text_region_note.pack(side=tk.BOTTOM, fill=tk.X) self.colour_schemes = [{ "bg": "lightgrey", "fg": "black" }, { "bg": "grey", "fg": "white" }] def classify_language(self, event=None): text_input = self.text_region.get(1.0, tk.END).strip() self.language_identified.set(lc[classify_text( text=text_input, model=model, le=le, n_gram_list=n_gram_list)]) self.text_region.delete(1.0, tk.END) if __name__ == "__main__": model, le, lc, n_gram_list = load_model() todo = Todo() todo.mainloop()
def plot_roc(y_test, probs, plotname = path_to_project_presentation+'roc_plt.png'): ''' input: y_test (np array), probs (np array), plotname (string) output: None (saves plot to disk) ''' plt.clf() # plots 45 degree angle line for reference z = np.linspace(0,1) plt.plot(z, z, ls='dotted') for i, lbl in enumerate(classes): plot_one_curve(y_test, probs[:,i], lbl) plt.xlabel('False Positive Rate (FPR)') plt.ylabel('True Positive Rate (TPR)') plt.title('ROC Curve') plt.legend() plt.savefig(plotname) if __name__ == '__main__': model = load_model('CNN_model_architecture.json', 'model_weights.h5') X_test = np.load(path_to_project_data+'X_test_all.npy') y_test = np.load(path_to_project_data+'y_test_all.npy') # to predict on many images probs = predict_many(model, X_test) plot_roc(y_test, probs)
from crawler import get_review from predict import sentiment_predict, load_model from word_cloud import ngram if __name__ == "__main__": model = load_model("architecture.json", "trained_model_weights.h5") while True: url = input("Enter Amazon Product Url- (quit for q) ") if url == 'q': break avg_stars, long_reviews = get_review(url) # url을 입력받아 해당 상품의 review 추출 res = sentiment_predict(model, long_reviews) # 학습된 LSTM 모델을 통해 분류결과 출력 print("result : ", res) print("avg_stars : ", avg_stars) unigram = ngram(1, long_reviews) # review에 자주 사용된 단어를 1-gram으로 추출 words = unigram.get_freq_list() if res >= 0.6: unigram.gen_wordcloud(words, (36, 120, 255)) # blue background else: unigram.gen_wordcloud(words, (255, 18, 18)) # red background # example of good sentiment # https://www.amazon.com/AcuRite-Humidity-Thermometer-Hygrometer-Indicator/dp/B0013BKDO8/ref=br_asw_pdt-2?pf_rd_m=ATVPDKIKX0DER&pf_rd_s=&pf_rd_r=42W413EARKNAFRYM8VJG&pf_rd_t=36701&pf_rd_p=ebb28e10-c446-456a-ac5d-f251207d3750&pf_rd_i=desktop # bad sentiment # https://www.amazon.com/Home-Zone-Stainless-Rectangular-Removable/dp/B01H6CJ7HQ/ref=sr_1_56?keywords=garbage&qid=1557376063&refinements=p_72%3A2661621011&rnid=2661617011&s=gateway&sr=8-56
@app.route('/') def index(): return 'Get request to EAST server.' @app.route('/process', methods=['POST']) def process(): image_buf = request.files['image'] image = Image.open(image_buf).convert('RGB') image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR) global graph with graph.as_default(): boxes = process_image(model, image) lines = [] for box in boxes: line = box.reshape((8, )).tolist() lines.append(line) return jsonify(lines) if __name__ == '__main__': logging.getLogger().setLevel(logging.ERROR) model = load_model(model_path='models/east/model-funsd150-icdar200.h5') graph = tf.get_default_graph() app.run(host='127.0.0.1', port=5001)
test_data = LogoData(targetlist_path=args.targetlist, targetlist_labeldict=args.target_label, transform=transform_test, train=False) testloader = DataLoader(test_data, batch_size=256, shuffle=False, num_workers=8) # initialize model classes = 180 # the model is trained with only 180 brands modelpath = args.model_path device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = load_model(classes, modelpath) model.to(device) model.eval() # get all prediction with torch.no_grad(): # freeze model pred_prob = torch.tensor([], device=device) targets = torch.tensor([], device=device) pred_feat = torch.tensor([], device=device) for i, (inputs, labels) in enumerate(testloader): inputs, labels = inputs.to(device, dtype=torch.float), labels.to( device, dtype=torch.float) pred_feat = torch.cat((pred_feat, model.features(inputs)), 0) pred_prob = torch.cat((pred_prob, model(inputs)), 0) targets = torch.cat((targets, labels), 0)
def main(conf: DictConfig): print(conf.pretty()) if 'gpu' in conf.general.keys(): torch.cuda.set_device(conf.general.gpu) device = torch.device('cuda') torch.manual_seed(conf.general.seed) model = load_model(conf).to(device) face_det_conf = merge_detector_cfg(conf['face-detection']) detector = init_detector(face_det_conf, face_det_conf['weights'], device).to(device) crop_faces = partial(find_faces, model=detector, device=device, conf=face_det_conf) data_conf = conf.data.test base_dir = data_conf.dir file_names = [ file for file in os.listdir(base_dir) if file.endswith('mp4') ] files = [os.path.join(base_dir, file) for file in file_names] if not len(files): raise RuntimeError("No files was found in {}".format(base_dir)) else: logging.info("Total number of files: {}".format(len(files))) transforms = T.Compose( [instantiate(val['transform']) for val in data_conf.transforms]) logging.debug("Using transforms: {}".format(transforms)) def _predict(images: List[Tensor]) -> float: last_idx = len(images) - 1 num_samples = min(data_conf.sample.max_samples, len(images)) idxs = np.linspace(0, last_idx, num_samples, dtype=int, endpoint=True) images = list(map(images.__getitem__, idxs)) x = torch.stack(list(map(transforms, images))) pad_amount = reader_conf.frames - x.size(0) if pad_amount > 0: x = pad_torch(x, pad_amount, 'start') # D, C, H, W -> C, D, H, W x = x.transpose(0, 1).unsqueeze_(0) with torch.no_grad(): out = model(x, None) y_pred = model.to_y(*out).cpu().item() return y_pred def _save(): save_dir = conf.get('general.save_dir', os.getcwd()) save_path = os.path.join(save_dir, conf.general.csv_name) logging.info("Saving predictions to {}".format(save_path)) for _ in range(5): try: df.to_csv(save_path, index=False) break except Exception as e: logging.error(traceback.format_exc()) continue reader_conf = data_conf.sample df = pd.DataFrame(file_names, columns=['filename']) df['label'] = 0.5 del file_names for idx, path in enumerate(files): if not (idx + 1) % 100: _save() gc.collect() try: frames, meta = read_frames_cv2(path, reader_conf.frames, with_meta=True) if frames is not None and len(frames) > 0: frames = torch.from_numpy(frames).to(device) px_ratio = meta['width'] * meta['height'] / (1920 * 1080) bs = math.ceil(face_det_conf['batch_size'] / max(px_ratio, 1.0)) faces = crop_faces(frames, bs) del frames if len(faces) > 0: y_hat = _predict(faces) df.loc[idx, 'label'] = y_hat logging.info( "cv2 | {} | faces: {} | y: {:.03f} | {}".format( idx, len(faces), y_hat, path)) else: logging.warning("No faces have found in ({}): {}".format( idx, path)) del faces except Exception as e: logging.error(traceback.format_exc()) torch.cuda.empty_cache() gc.collect() _save() logging.info("DONE")
import dlib import cv2 import os import argparse import time from parameters import NETWORK, DATASET, VIDEO_PREDICTOR from model import build_model from predict import load_model, predict top = Tk() top.title('Facial Expression Recognition') top.geometry('500x270') canvas = Canvas(top, width=200, height=200, bd=0, bg='white') canvas.grid(row=1, column=0) model = load_model() image = None shape_predictor = None image = None def showImg(): File = askopenfilename(title='Open Image') e.set(File) print(e.get()) image = Image.open(e.get()) w, h = image.size image = image.resize((200, 200)) imgfile = ImageTk.PhotoImage(image) canvas.image = imgfile # <--- keep reference of your image
import predict predict.load_model()
import data import predict import numpy as np import tensorflow as tf with tf.Session() as session: network = predict.load_model(session) for set_name in ['Set5', 'Set14', 'B100', 'Urban100']: for scaling_factor in [2, 3, 4]: dataset = data.TestSet(set_name, scaling_factors=[scaling_factor]) predictions, psnr = predict.predict(dataset.images, session, network, targets=dataset.targets, border=scaling_factor) print('Dataset "%s", scaling factor = %d. Mean PSNR = %.2f.' % (set_name, scaling_factor, np.mean(psnr)))
test_graphs.append(g_device) else: test_graphs = org_test_graphs test_set = IEDataset(config.test_file, test_graphs, test_align, test_exist, gpu=use_gpu, relation_mask_self=config.relation_mask_self, relation_directional=config.relation_directional, symmetric_relations=config.symmetric_relations) model, tokenizer, config, vocabs = load_model(model_path, model_name, device=args.gpu, gpu=True, beam_size=5) test_set.numberize(tokenizer, vocabs) valid_patterns = load_valid_patterns(config.valid_pattern_path, vocabs) test_batch_num = len(test_set) // config.eval_batch_size + \ (len(test_set) % config.eval_batch_size != 0) print("message passing level: ", config.lamda) tasks = ['trigger', 'relation', 'role'] best_dev = {k: 0 for k in tasks} progress = tqdm.tqdm(total=test_batch_num, ncols=75, desc='Test')
error_graph(mse[:, 1], storage_directory, cluster_indices, E, predict_energies) sys.exit() #try to load the dataset dataset = None dataset = load_dataset(dataset_path) if not dataset: print( "Could not load dataset file. Please ensure it is of the right format (.npz or .xyz)" ) sys.exit(2) #try to load model import predict predict.load_model(model_path) #prepare data try: R = r_to_desc(dataset["R"]) E = np.array(dataset["E"]) F = np.array(dataset["F"]) except getopt.GetoptError as err: print( "Unable to find necessary data in data set. Data set must contain ['R'],['F'] and ['E'] keys corresponding to spatial data, forces and energies." ) print(err) sys.exit(2) #create storage directory and save path to it for further functions storage_dir = create_storage_directory(model_path, dataset_path,
import cv2 import scipy.misc from process_image import read_image, get_slice_masks,get_best_sliced_image from predict import predict, load_model n_rows = 33 if __name__=='__main__': # construct the argument parser and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-i", "--image", required=True, help="Path to the image") ap.add_argument("-m", "--model", required=True, help="Path to the vgg16 weights .h5-file ") ap.add_argument('-o', '--output', default='out.jpg', help='Path for image output') args = vars(ap.parse_args()) print 'Loading image, producing slices' image = read_image(args['image']) masks = get_slice_masks(image) print 'Loading model' model = load_model(args['model']) image_classification = predict(model, image, order=True)[0] print 'Classification with highest probability: %s (p=%.2f)' % (image_classification[0], image_classification[1]) print 'Identifying best image' best_image = get_best_sliced_image(image, model, masks, n_rows=n_rows) scipy.misc.imsave(args['output'], best_image)