def workflow(day_dir, args): st = time.time() mul_num = args.mul_num expert = args.expert subject_name = args.subject_name output_dir = args.output_dir post_process = args.post_process svm = args.svm if args.preprocess: print('Start preprocess video') preprocess(day_dir, expert, subject_name, output_dir, mul_num=mul_num) print('Preprocess video completed.') if args.predict: date = get_path_leaf(day_dir) test_name = '{}_{}_{}.txt'.format(expert, subject_name, date) file_list_path = make_list(test_name, output_dir, date) model = get_pretrained_model(args) print("Post process: {}, svm: {}".format(post_process, svm)) predict(args, model, file_list_path, output_dir, date, args.result_name, post_process=post_process, svm=svm) if args.remove_intermediate: shutil.rmtree(os.path.join(output_dir, 'intermediate')) print('Directory {} complete processing!'.format(day_dir)) print("Cost {} minutes".format((time.time() - st) / 60))
def init(self, root_dir, pair, time_series, prices, labels, price_range, is_use_residual, is_train = True): if is_use_residual: file_ext = pair + "_use_resid" else: file_ext = pair if os.path.isfile(root_dir + "volatility41_model_test_predictions_" + file_ext) == False: if is_train == False: return None, None x = self.create_training_set(pair, time_series, prices, labels, price_range) start = 0 end = 700 predictions = [] current_prices = [] while end < len(labels): predictions.append(predict(start, end, x, labels, is_use_residual)) current_prices.append(prices[end - 1]) print start start += 12 end += 12 pickle.dump(predictions, open(root_dir + "volatility41_model_test_predictions_" + file_ext, 'wb')) pickle.dump(current_prices, open(root_dir + "volatility41_model_test_prices_" + file_ext, 'wb')) predictions = pickle.load(open(root_dir + "volatility41_model_test_predictions_" + file_ext, 'rb')) current_prices = pickle.load(open(root_dir + "volatility41_model_test_prices_" + file_ext, 'rb')) return predictions, current_prices
def make_prediction(self, pair, time_series, prices, labels, price_range, history_prices, is_use_residual): train_start = len(labels) - 700 train_end = len(labels) x = self.create_training_set(pair, time_series, prices, labels, price_range) return predict(train_start, train_end, x, labels, is_use_residual)
def put_text(self): data = predict("image/test.png") for digit in data: k, m, n = digit self.c.create_text(k, m, text=n, fill="red", font="Times 40 italic bold", anchor="center")
def init(self, root_dir, pair, time_series, prices, labels, price_range, is_use_residual, is_train=True): #0, 200, 300, 100, 200 in sample if is_use_residual: file_ext = pair + "_use_resid" else: file_ext = pair if os.path.isfile(root_dir + "regime59_model_test_predictions_" + file_ext) == False: if is_train == False: return None, None #self.calendar = pd.DataFrame.from_records(download_calendar(31536000), columns=['currency', 'impact', 'actual', 'forecast', 'time', 'region']) x = self.create_training_set(pair, time_series, prices, labels, price_range, prices) start = 0 end = 700 predictions = [] current_prices = [] while end < len(labels): predictions.append( predict(start, end, x, labels, is_use_residual)) current_prices.append(prices[end - 1]) print start start += 12 end += 12 pickle.dump( predictions, open(root_dir + "regime59_model_test_predictions_" + file_ext, 'wb')) pickle.dump( current_prices, open(root_dir + "regime59_model_test_prices_" + file_ext, 'wb')) predictions = pickle.load( open(root_dir + "regime59_model_test_predictions_" + file_ext, 'rb')) current_prices = pickle.load( open(root_dir + "regime59_model_test_prices_" + file_ext, 'rb')) return predictions, current_prices
def back_test_recent(self, pair, time_series, prices, labels, price_range, history_prices): x = self.create_training_set(pair, time_series, prices, labels, price_range) end = len(labels) - (40 * 24) start = end - 700 predictions = [] current_prices = [] while end < len(labels): predictions.append(predict(start, end, x, labels)) current_prices.append(prices[end - 1]) print start start += 12 end += 12 return predictions, current_prices
def main(): path = 'J:/weibodata/a.csv' #result = readfile() result = openExcel(path) print(result) List = name(result) print(List) relations = (predict(List)) f = open('relation.csv','w',encoding='utf-8',newline='') csv_writer = csv.writer(f) csv_writer.writerow(["number","person1", "person2", "relation","content","time_created"]) for number in range(len(relations)): relation = relations[number] if relation[2] != '': csv_writer.writerow([number]+relation+[result[number][1]])
def init(self, pair, time_series, prices, labels, price_range, lag): if os.path.isfile("/tmp/barrier1_model_test_predictions_" + pair) == False: cluster_num = 4 self.kmeans = KMeans(n_clusters=cluster_num, init='k-means++', max_iter=100, n_init=1, random_state=42).fit(prices) x = self.create_training_set(pair, time_series, prices, labels, price_range, lag) start = 0 end = 700 predictions = [] current_prices = [] while end < len(labels): predictions.append(predict(start, end, x, labels)) current_prices.append(prices[end - 1]) print end start += 12 end += 12 pickle.dump( predictions, open("/tmp/barrier1_model_test_predictions_" + pair, 'wb')) pickle.dump(current_prices, open("/tmp/barrier1_model_test_prices_" + pair, 'wb')) predictions = pickle.load( open("/tmp/barrier1_model_test_predictions_" + pair, 'rb')) current_prices = pickle.load( open("/tmp/barrier1_model_test_prices_" + pair, 'rb')) return predictions, current_prices
help='Print debug info') subparsers = parser.add_subparsers(dest='command') training = subparsers.add_parser( 'train', help="Trains the model, '-nc' skip cleaning & '-ni' skip IDF") training.add_argument('-nc', action='store_true', help="Skips the Data Cleaning Process") training.add_argument('-d', type=str, help="Enter the name of the dataset") training.add_argument('-t', type=int, help="Enter the rows to be truncated") prediction = subparsers.add_parser( 'predict', help="Predicts the output, 'main.py -P number_of_tags url'") prediction.add_argument('n', type=int) prediction.add_argument('url', type=str) args = parser.parse_args() if args.command == 'train': if args.t and args.d is not None: train(args.nc, args.d, args.t) elif args.t is None and args.d is None: train(args.nc, 'articles.csv', -1) elif args.t is None: train(args.nc, args.d, -1) elif args.d is None: train(args.nc, 'articles.csv', args.t) elif args.command == 'predict': predict(args.n, args.url)
def test_predict(self): no_of_predicted_calls, shape = predict('srkw_cnn.h5', '../datasets/test_srkw/calls') self.assertEqual(no_of_predicted_calls, 3) self.assertEqual(shape, (3, 1))
def main(): List = name() print(List) predict(List)
def index(): oeng = str(request.get_json(force=True)["eng"]) eng = clean(oeng) prediction = predict(eng) print(prediction) intent = prediction["intent"] slots = proc_slots(prediction["slots"], eng) if "slots" in request.get_json(force=True): for k in request.get_json(force=True)["slots"]: slots[k] = request.get_json(force=True)["slots"][k] if intent == "mkdir": if "dirname" in slots: if "dirloc" in slots: return jsonify({ "cmd": ["cd " + slots["dirloc"], "mkdir " + slots["dirname"]] }) else: return jsonify({"cmd": ["mkdir " + slots["dirname"]]}) if "dirname" not in slots: return jsonify({ "res": "What do you want the directory to be named?", "slot": "dirname" }) elif intent == "rmdir": if "dirname" in slots: if "dirloc" in slots: return jsonify({ "cmd": ["cd " + slots["dirloc"], "rmdir " + slots["dirname"]] }) else: return jsonify({"cmd": ["rmdir " + slots["dirname"]]}) if "dirname" not in slots: return jsonify({ "res": "What is the name of the folder?", "slot": "dirname" }) elif intent == "cd": if "dirname" not in slots: return jsonify({ "res": "Which directory do you want to move to?", "slot": "dirname" }) else: return jsonify({"cmd": ["cd " + slots["dirname"]]}) elif intent == "ls": if "dirname" not in slots: return jsonify({"cmd": ["ls"]}) else: return jsonify({"cmd": ["cd " + slots["dirname"], "ls"]}) elif intent == "touch": if "filname" not in slots: return jsonify({ "res": "What should be the name of this new file?", "slot": "filname" }) else: if "dirloc" not in slots: return jsonify({"cmd": ["touch " + slots["filname"]]}) else: return jsonify({ "cmd": ["cd " + slots["dirloc"], "touch " + slots["filname"]] }) elif intent == "man": if "comname" not in slots: return jsonify({ "res": "Which command do you want to know about?", "slot": "comname" }) else: return jsonify({"cmd": ["man " + slots["comname"]]}) elif intent == "cdback": return jsonify({"cmd": ["cd .."]}) # elif intent == "mv": # if "mv_source" in slots and "mv_dest" in slots: # return jsonify({ # "cmd": ["mv " + slots["mv_source"] + " " + slots["mv_dest"]] # }) # elif "mv_source" not in slots: # return jsonify({ # "res": "Which file do you wish to move?" # }) # elif "mv_dest" not in slots: # return jsonify({ # "res": "Where do you want this file to be moved?" # }) return "no intent detected"
def test_predict(self): no_of_predicted_calls, shape = model_predict.predict("preprocess_mag_scipy_Srkws.h5", "content/datasets/val_srkw/calls") self.assertEqual(no_of_predicted_calls, 44) self.assertEqual(shape, (44, 1))