def main(): logging.basicConfig(stream=sys.stderr, level=logging.DEBUG) logging.info('Starting...') input_args = get_predict_input_args() image_path = input_args.image_path checkpoint_path = input_args.checkpoint_folder + '/' + input_args.checkpoint_file top_k = input_args.top_k show_p = input_args.show_probs number_of_classes = input_args.number_of_classes category_names_path = input_args.category_names with open(category_names_path, 'r') as f: category_names = json.load(f) device = 'cpu' if input_args.gpu and torch.cuda.is_available: device = 'cuda' model, class_from_index, arch = load_checkpoint(checkpoint_path, device, number_of_classes) model = freeze_layers(model, arch) predict(image_path, model, device, category_names, class_from_index, top_k, show_probs=show_p)
def run(arg, f1, f2): if arg == '--train': agent = objects.Agent(14, 2) fight_data = construct.fights('../xml_data/schedule.xml') write_file(fight_data, TRAIN_FILE_NAME) network.train(agent) elif arg == '--predict': agent = objects.Agent(14, 2) fight_data = construct.fights('../xml_data/schedule.xml') write_file(fight_data, FIGHT_FILE_NAME) network.predict(agent, f1.replace(" ", ""), f2.replace(" ", "")) print "Arguments Processed!"
def __open(self): self.file_opt = options = {} options['defaultextension'] = '.jpg' options['filetypes'] = [('image files', '.jpg'), ('all files', '.*')] options['initialdir'] = 'C:\\' self.file_path = tkFileDialog.askopenfilename(**self.file_opt) matrix = network.predict(self.file_path) self.game.start_puzzle = matrix self.game.start() self.__clear_answers()
def post(self): try: id = None global counter with counter.get_lock(): id = counter.value counter.value += 1 parser = reqparse.RequestParser() parser.add_argument("sequence", required=True) parser.add_argument("sens", required=True) parser.add_argument("num_matches", required=True) args = parser.parse_args() seq = args["sequence"] sens = float(args["sens"]) num_matches = int(args["num_matches"]) # data["sequence"].append(seq) logging.info("Seq " + seq + " with id " + str(id) + " from ip " + request.remote_addr) parse(seq, id) process(id, seq) p = Process(target=predict(id)) p.start() # print("started") p.join() # print("joined") # predict(id) dot_bracket_string = to_string(id) print(dot_bracket_string) aligned_dot = align_sequence(seq, dot_bracket_string, sens, num_matches, 5) # Maybe make threshold a parameter file1 = open("./pics/" + str(id) + "_pred.png", "rb") img1 = file1.read() file2 = open("./pics/" + str(id) + "_binarized.png", "rb") img2 = file2.read() # resp = make_response(json.dumps(id), 200) resp = make_response(json.dumps({"id": id, "seq" : aligned_dot, "raw_dot" : dot_bracket_string, "img1" : b64encode(img1).decode('utf-8'),\ "img2" : b64encode(img2).decode('utf-8')}), 200) # id+=1 resp.headers.extend({ 'Access-Control-Allow-Headers': '*', 'Access-Control-Allow-Credentials': 'true', 'Access-Control-Allow-Origin': '*' }) return resp except Exception as e: print(e) resp = make_response(json.dumps({"id": id, "seq": "Error"}), 200) resp.headers.extend({ 'Access-Control-Allow-Headers': '*', 'Access-Control-Allow-Credentials': 'true', 'Access-Control-Allow-Origin': '*' }) return resp
def predict(model, text): """ predict """ model_text, topic_dict = \ preprocessing_for_one_conversation(text.strip(), topic_generalization=True) if isinstance(model_text, unicode): model_text = model_text.encode('utf-8') response = network.predict(model, model_text) topic_list = sorted(topic_dict.items(), key=lambda item: len(item[1]), reverse=True) for key, value in topic_list: response = response.replace(key, value) return response
'%d-%b-%Y')-datetime.timedelta(days=1),'%d-%b-%Y') clean_csv(date_begin,date_end) df_test=datas(date_begin,date_end) ''' #=========================================================================== ''' data_split for analysis predict_data for real life ''' #--------------------------------------------------------------------------- #df_train,df_test=data_split(df_train) #tempmax=23 #df_train,df_predict=predict_data(df_train,date_end,date_predict,tempmax_predict) #=========================================================================== model = model_build(df_train, epochs=1000, batch_size=32) model.load_weights("weights.best.hdf5") model.compile(optimizer='adam', loss='mean_squared_error', metrics=[keras.losses.mean_absolute_percentage_error]) #=========================================================================== predictions = predict(model, df_test) df_test.drop(df_test.head(24 * 7 + 1).index, inplace=True) df_test['Predicted'] = predictions.values #=========================================================================== plot_values(df_test) errors(df_test) #=========================================================================== print(" RUNTIME \n --- %s seconds ---" % (time.time() - start_time)) #===========================================================================
tf.enable_eager_execution() cap = cv2.VideoCapture(0) net.initialize_flags(model_dir='data') estimator = net.get_estimator() cv2.startWindowThread() while True: # Capture frame-by-frame ret, frame = cap.read() # Our operations on the frame come here gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.resize(gray, (256, 144)) prediction = next(net.predict(estimator, gray.T / 255)) for i in range(data.STRIDE_W): for j in range(data.STRIDE_H): p = prediction[i][j] if p[4] > 0.6: x = i * data.STRIDE + p[0] * data.STRIDE y = j * data.STRIDE + p[1] * data.STRIDE w = p[2] * data.IMAGE_WIDTH h = p[3] * data.IMAGE_HEIGHT cv2.rectangle(gray, (int(x - w / 2), int(y - h / 2)), (int(x + w / 2), int(y + h / 2)), 2) cv2.imshow('frame', gray)
if not os.path.isfile(image_filename): print('Unable to find image file', image_filename) exit() # BUGBUG: useful for testing, but probably don't want to assume the # image is from the image dataset image_category = image_filename.split(os.sep)[-2] image_data = data.process_image(args.input) # create the network device = 'cuda' if args.gpu else 'cpu' model = network.load_network(args.checkpoint, device) # predict probs, classes = network.predict(image_data, model, device, topk=args.top_k) # Load the category to name mapping if provided cat_to_name = None if args.category_names and os.path.isfile(args.category_names): with open(args.category_names, 'r') as f: cat_to_name = json.load(f) # output results print('Image category:', image_category) if cat_to_name: print('Image name:', cat_to_name[image_category]) print('Probabilities:', probs) print('Classes:', classes) if cat_to_name: names = [cat_to_name[cat] for cat in classes]
data_split for analysis predict_data for real life tempmax=23 df_train,df_predict=predict_data(df_train,date_end,date_predict,tempmax_predict) ''' #--------------------------------------------------------------------------- #=========================================================================== model = model_build(df_train, epochs=5, batch_size=32, prediction_step=prediction_step) model.load_weights("weights.best.hdf5") model.compile(optimizer='adam', loss='mean_squared_error', metrics=[keras.losses.mean_absolute_percentage_error]) #=========================================================================== predictions = predict(model, df_test, prediction_step) df_test.drop( df_test.head(val_preappended_data + prediction_step).index, inplace=True ) #8112 corresponds to the removal of values used to make prediction df_test['Predicted'] = predictions.values #=========================================================================== plot_values(df_test) errors(df_test) #=========================================================================== print(" RUNTIME \n --- %s seconds ---" % (time.time() - start_time)) #===========================================================================
pa = ap.parse_args() path_image = pa.input_img number_of_outputs = pa.top_k power = pa.gpu input_img = pa.input_img path = pa.checkpoint training_loader, testing_loader, validation_loader, train_data = network.load_data( ) model = network.load_checkpoint(path) with open('cat_to_name.json', 'r') as json_file: cat_to_name = json.load(json_file) # probabilities = network.predict('./flowers/test/1/image_06743.jpg', model, number_of_outputs, power) probabilities = network.predict(path_image, model, number_of_outputs, power) labels = [ cat_to_name[str(index + 1)] for index in np.array(probabilities[1][0]) ] probability = np.array(probabilities[0][0]) i = 0 while i < number_of_outputs: print("{}. {} with a probability of {}".format(i + 1, labels[i], probability[i])) i += 1 print("----predict happy end------")