class WeatherForecast(object): url_base = "http://api.wunderground.com/api/" url_service = {"forecast": "/forecast/q/"} def __init__(self, api_key): super(WeatherForecast, self).__init__() self.api_key = api_key self.predict = Prediction() self.result = [] def forecast(self, location, country): # Download web url = WeatherForecast.url_base + self.api_key + WeatherForecast.url_service["forecast"] \ + country + "/" + location + ".json" f = requests.get(url) data_json = json.loads(f.text) return self.process_data(data_json["forecast"]) def process_data(self, data_json): for values in data_json["simpleforecast"]["forecastday"]: listatemp = [] listatemp.append(values["date"]["weekday"]) listatemp.append( "In the day: " + self.predict.get_predict_forecast(values["high"]["celsius"])) listatemp.append( "In the night: " + self.predict.get_predict_forecast(values["low"]["celsius"])) self.result.append(listatemp) return self.result
def test_basic_intersect(self): prediction1 = Prediction({"p1": .5, "p2": .5}) prediction2 = Prediction({"p3": .5, "p4": .5}) intersect_pred = prediction1.intersect(prediction2, None, DummyPredGetter()) self.assertEquals(sum(intersect_pred.values()), 1) self.assertTrue(all((val == .25 for val in intersect_pred.values())))
def createOutpuDataOfAlgortim(filename, name_columns_of_valid_features): predict_algor = Prediction(filename, name_columns_of_valid_features, all_features) text_algor = predict_algor.getTextPredict() predict_proba_algor = predict_algor.getPredictProba() return predict_algor, text_algor, predict_proba_algor
def main(argv): if argv[1] == 'train_process': get_data() data_preparation = DataPreparation() data_preparation.generate_data_for_model() train_model = Train() train_model.compute_locations_models() prediction = Prediction() prediction.get_models() create_dashboard(prediction)
def predict(): model = request.args.get('model') text = request.args.get('text') if model == "sentiment": prediction = Prediction(sentiment_clf, model, text) elif model == "sarcasm": prediction = Prediction(sarcasm_clf, model, text) else: print("ERROR: {} model not recognized".format(model)) return render_template('prediction.html', prediction=prediction)
def predict(self): sat = str(self.tbChooseSatellite.currentText()) tle = self.tles[sat] with open("temp_tle.txt", "w") as f: f.write(tle + "\n") location = str(self.tbLocation.itemData( self.tbLocation.currentIndex())) satellite = Satellite(sat, tle, location) self.prediction = Prediction(satellite) self.prediction.show()
def _accuracy(self, data): ''' Returns the accuracy of the network on the given PairedData -- data: PairedData object ''' N_GUESSES = 3 # this is the number of guesses we make output = self.output_with_activation(data.X) prediction = Prediction(output) P = prediction.most_likely(N_GUESSES) Y = data.Y return np.mean(np.sum(np.multiply(P, Y), axis=0))
def readImage(): predictions, probabilities = prediction.predictImage(os.path.join( execution_path, "cat.jpg"), result_count=5) predictionList = [] for eachPrediction, eachProbability in zip(predictions, probabilities): predictionObj = Prediction(eachPrediction, eachProbability) predictionObj.info() predictionList.append(predictionObj) return predictionList
def main_function(): if list(request.form.values())[0] == 'AxisBank': pr = Prediction( ['AxisBank', ["#AxisBank", "#axisbank", "@axisBank", "@RBI"]]) pr.get_final_prediction() output = 'Rs. {}'.format(round(pr.get_final_prediction()[0], 2)) else: output = 'Not Available' return render_template( 'index.html', prediction_text='Forecasted value is {}'.format(output))
def __init__(self, token): logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) self.logger = logging.getLogger("log") self.bot = Bot(token) self.updater = Updater(token) self.dispatcher = self.updater.dispatcher start_handler = CommandHandler('start', self.start) self.dispatcher.add_handler(start_handler) info_handler = CommandHandler('info', self.info) self.dispatcher.add_handler(info_handler) feedbac_handler = CommandHandler('feedback', self.feedback) self.dispatcher.add_handler(feedbac_handler) insertion_handler = CommandHandler('insert', self.insert) self.dispatcher.add_handler(insertion_handler) self.dispatcher.add_handler(CallbackQueryHandler(self.button_clicked)) text_message_handler = MessageHandler(Filters.text, self.text_message) self.dispatcher.add_handler(text_message_handler) document_message_handler = MessageHandler(Filters.document, self.document_message) self.dispatcher.add_handler(document_message_handler) self.dispatcher.add_handler(document_message_handler) self.dispatcher.add_error_handler(self.error) self.ner = Ner_Babel() self.prediction = Prediction() self.kb_interface = KB_interface() self.text_processing = text_processing() #O dicionario contem dicionarios que representam que tem o mesmo nome dos # metodos que requisitam precionar de butão pelo usuario, esses dict possuem as # variaveis uteis para lidar com essa ação. # O callback_query do botao será então a identidicação desse estado e botão precionado self.global_variables = { "last_question": "", "last_answer": "", "main_entity": "", "related_entity": "", "ambiguos_entities": [], "sugests_topics": { "buttons": [] }, "ask_for_answer_evaluation": { "buttons": ["P", "N"] }, "sugests_question": { "buttons": [] } }
def main(): preprocess = Preprocess(data_file, nrows) taxi_summary, L, A, T, p_pick, p_tran, r, t_drive, t_wait = preprocess.preprocess_data() print("\n\nFeature Generation Completed .....") print("\n\n ---- Top 10 rows ---- \n\n", taxi_summary.head()) prediction = Prediction() prediction.MDP_Dynamic_Program(L, A, T, p_pick, p_tran, r, t_drive, t_wait) print("\n\nStarting Revenue Prediction .....") prediction.predict_revenue(taxi_summary)
class MainWindow(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): QtWidgets.QMainWindow.__init__(self) self.setupUi(self) self.database = Database() self.tles = self.database.get_tles() self.fill_satellite_list() self.fill_location_list() self.tbTle.setText("https://www.celestrak.com/NORAD/elements/noaa.txt") self.btnLoadTle.clicked.connect(self.load_tles) self.btnPredict.clicked.connect(self.predict) self.btnAddLocation.clicked.connect(self.open_location_chooser) self.location_chooser = Location(self.database, self) def load_tles(self): self.database.insert_tles(self.tbTle.text()) self.tles = self.database.get_tles() self.fill_satellite_list() def fill_satellite_list(self): self.tbChooseSatellite.clear() tle_list = list(self.tles.keys()) self.tbChooseSatellite.addItems(tle_list) def fill_location_list(self): locations = self.database.get_locations() location_names = [loc[1] for loc in locations] location_coord = [loc[2] for loc in locations] for i in range(len(location_names)): self.tbLocation.addItem(location_names[i], location_coord[i]) def predict(self): sat = str(self.tbChooseSatellite.currentText()) tle = self.tles[sat] with open("temp_tle.txt", "w") as f: f.write(tle + "\n") location = str(self.tbLocation.itemData( self.tbLocation.currentIndex())) satellite = Satellite(sat, tle, location) self.prediction = Prediction(satellite) self.prediction.show() def open_location_chooser(self): self.location_chooser.show()
def processOpenPoseJsonData(self, personObject): '''Callback run each time a new json position joints is received ''' # Callback when openpose output received rospy.loginfo('[OPENPOSE_JSON] received data:') # Preprocess stream h = self.currentImage.shape[0] w = self.currentImage.shape[1] json_positions = [] # Reconstruct a python dict to add imageSize on each person for i in range(len(personObject.persons)): bodyParts = [] for j in range(len(personObject.persons[i].body_part)): bodyParts.append({ 'x': personObject.persons[i].body_part[j].x, 'y': personObject.persons[i].body_part[j].y, 'confidence': personObject.persons[i].body_part[j].confidence }) json_positions.append({ "body_part": bodyParts, "face_landmark": personObject.persons[i].face_landmark, "image_size": { "width": w, "height": h } }) # Predict classes predictionObject = Prediction() data = predictionObject.preprocess(json_positions) predictions = predictionObject.predict(data) predictions = predictionObject.predictClasses(data) for i in range(len(predictions)): print('Person %s' % str(i + 1) + ' - ' + predictionObject.LABEL[predictions[i][0]]) self.postProcess(json_positions[i], int(predictions[i])) ros_msg_image = self._bridge.cv2_to_imgmsg(self.currentImage, 'bgr8') self._output_image_pub.publish(ros_msg_image)
def run(self, verbose): # Training predictor = Prediction(self.features, self.targets, self.window_size, self.nef, self.max_experts) for row_index in range(self.window_size,len(self.features)): pred = predictor.makePrediction(row_index, verbose) # make prediction (including risk management) # update profit # update stats pass
def from_nextbus(cls, agency_tag, route_tag, direction_tag, stop_tag): logging.info("from_nextbus@Estimation...") etree = url.fetch_nextbus_url({ "a" : agency_tag, "command" : "predictions", "s" : stop_tag, "r" : route_tag.replace("__", " ")}) prediction_keys = [prediction.key for prediction in Prediction.from_nextbus(etree, agency_tag, route_tag, direction_tag, stop_tag)] estimations = [] for elem in etree.findall("predictions"): route_tag = elem.get("routeTag") stop_tag = elem.get("stopTag") agency_title = elem.get("agencyTitle") route_title = elem.get("routeTitle") stop_title = elem.get("stopTitle") key = "%s@%s@%s@%s" % (agency_tag, route_tag, direction_tag, stop_tag) e = Estimation(id = key, route_tag = route_tag, stop_tag = stop_tag, agency_title = agency_title, route_title = route_title, stop_title = stop_title, predictions = prediction_keys) estimations.append(e) try: ndb.put_multi(estimations) except CapabilityDisabledError: # fail gracefully here pass return estimations
def compute_accuracy(predicted_classes, labels, threshold, distance, source_naming_list, \ target_indices, match_index, target_naming_list): """ Compute the accuracy for classifying the source items as elements of targets. """ predictions = [] predicted_classes[np.squeeze(distance) > threshold] = -1 correct = np.equal(predicted_classes, labels) try: accuracy = sum(correct) / len(correct) except ZeroDivisionError: accuracy = 0.0 correct_targets = predicted_classes[target_indices] == labels[target_indices] try: targets_accuracy = sum(correct_targets) / len(correct_targets) except ZeroDivisionError: targets_accuracy = 0.0 for j in range(len(source_naming_list)): tar_name = target_naming_list[match_index[j][0]].split('/')[-1] if predicted_classes[j] != -1 else '-1_' dist = distance[j] if predicted_classes[j] != -1 else -1 if dist is None: raise ValueError("Invalid distance") new_prediction = Prediction(source_naming_list[j].split('/')[-1], dist, 0.0, correct[j], target_indices[j], tar_name) predictions.append(new_prediction) return accuracy, targets_accuracy, predictions
def predictAllSeriesCombined(self, instance): y_true, y_pred = [], [] for time_series_idx in range(instance.n_time_series): istsv = instance.getViewForTimeSeries(time_series_idx, self.time_sampler) y_true.extend(np.asarray(istsv.y).tolist()) y_pred.extend(np.asarray(self.correlation_metric(istsv).flatten()).tolist()) return Prediction(y_true, y_pred, self)
def __init__(self, name, costPerUnit, materialNames, materialQtys, productionTime): self.productId = self.__class__.globalProductId self.__class__.globalProductId += 1 self.name = name self.costPerUnit = costPerUnit self.rawMatNames = materialNames self.rawMatQtys = materialQtys self.productionTime = productionTime # self.salesData = SalesData(costPerUnit) self.prediction = Prediction()
def make_prediction(fixture): home_team = Team(fixture['homeTeam']['team_id'], fixture['homeTeam']['team_name'], fixture['homeTeam']['logo']) away_team = Team(fixture['awayTeam']['team_id'], fixture['awayTeam']['team_name'], fixture['awayTeam']['logo']) home_goals = predict_goals(home_team.home_atk, away_team.away_def, league_averages['home_goals']) away_goals = predict_goals(away_team.away_atk, home_team.home_def, league_averages['away_goals']) pred_home_goals, home_poisson = expected_value(home_goals) pred_away_goals, away_poisson = expected_value(away_goals) kickoff = datetime.strptime(fixture['event_date'], "%Y-%m-%dT%H:%M:%S+00:00") db_connector.post_prediction_to_db(home_team, away_team, pred_home_goals, pred_away_goals, home_poisson, away_poisson, kickoff) return Prediction(home_team, away_team, pred_home_goals, pred_away_goals, home_poisson, away_poisson, kickoff)
def predict(self, x, y_test=None): """ Score """ logging.debug(self.predict.__name__) logging.debug('self.params shape: {}'.format(self.params.shape)) values = x.dot(self.params) mse = mean_squared_error(values, y_test) if y_test is not None else None return Prediction(values, mse)
def getPredictionList(self, predictionsTag): self.predictionList = [] for directionTag in predictionsTag.findall('direction'): for predictionTag in directionTag.findall('prediction'): predictionTag.attrib['direction'] = directionTag.attrib[ 'title'] self.predictionList.append(Prediction(predictionTag.attrib))
def _crop_image_helper(im, url, website, box, dest_folder, count, name="0"): try: count_value = count[0] a = im.crop(box) count_value = count_value + 1 file_name = "sliced-IMG-{0}-{1}.png".format(name, count_value) file_path = os.path.join(dest_folder, file_name) a.save(file_path) predict = Prediction(url, file_path, file_name, website, True) count[0] = count_value if predict.predict( ) >= settings.stoping_threshold or count_value >= 50: return False return True except Exception as e: logging.exception(e.__str__()) return True
def begin_prediction(self): try: user, mode = self.get_active_user(), self.get_active_mode() password = self.get_user_password(self.get_active_user(), PasswordTypes(mode)) except configparser.NoOptionError: print( 'Cannot begin prediction, password not set! User: {0}, Mode: {1}' .format(user, mode)) exit(2) print('''You are ready for the prediction {0}. \nIn this session you will simply "login" by entering the password you set earlier.''' .format(user)) muse = self.start_stream() input('\nPress any key to begin...') prediction = Prediction(user, mode, password) prediction.start() self.stop_stream(muse)
def display_page(pathname): if pathname == '/dash-board': return App() elif pathname == '/prediction': return Prediction() elif pathname == '/report': return Report() else: return Homepage()
def predictSingleSeries(self, instance, time_series_idx): ''' :param instance: an object of type Instance :param time_series_idx: which time_series to use for prediction :return: a prediction object ''' istsv = instance.getViewForTimeSeries(time_series_idx, self.time_sampler) y_pred = self.correlation_metric(istsv) return Prediction(istsv.y, y_pred, self)
def train(): if request.method == 'POST': try: intents_file = upload_file(request) except Exception as e: return "Error: {}".format(e) if request.method == 'GET': input_file = request.args.get('file') intents_file = input_file if input_file else "intents.json" ml = Train(intents_file, MODEL_DIR) try: ml.training() ml_prediction = Prediction(MODEL_DIR) ml_prediction.load_model() return "Train is completed" except Exception as e: return "Traing is failed {}".format(str(e))
def find_common_lhs_part(self, seq_index, lhs, predictions): for rule in sorted(self.simulator.rules_sets[seq_index], key=lambda r: (r.get_rule_score()), reverse=True): if rule.lhs == lhs: if rule.rhs in predictions: predictions[rule.rhs].append( Prediction( rule.rhs, lhs, rule, predictions[rule.rhs][-1].number_of_occurrences + 1)) else: predictions[rule.rhs] = [ Prediction(rule.rhs, lhs, rule, 1) ] # print("Adding to predictions", lhs, "==>", rule.rhs, "nr of rules supporting:", predictions[rule.rhs][-1].number_of_occurrences, "because of rule:\n", rule) # print() break
def __fake_predictions_generation(self) -> None: for hour in range(24 * 360): db_product = Prediction( city_id=0, timestamp=datetime.datetime.now() - datetime.timedelta(hours=24 * 360) + datetime.timedelta(hours=hour), prediction=random.randint(1000, 3000), ) self.__database.add(db_product) self.__database.commit() self.__database.refresh(db_product)
def predict(): log = Log() msg = __name__ + '.' + utils.get_function_caller() + ' -> enter' log.print(msg) # get data json_data = request.get_json(force=True) msg = 'json_data: ', json_data log.print(msg) user_id = json_data['user_id'] random_state = 42 input_df = pd.json_normalize(json_data) # save json_data and input_df for debugging purpose, save using unique name json_data_unique_filename = config.PATH_TO_DATASET + utils.get_unique_filename( 'json_data.json') input_df_unique_filename = config.PATH_TO_DATASET + utils.get_unique_filename( 'input_df.csv') with open(json_data_unique_filename, 'w') as outfile: json.dump(json_data, outfile) input_df = input_df[list(config.ACCEPTABLE_COLUMNS)] input_df.to_csv(input_df_unique_filename, index=False) prediction = Prediction(user_id, input_df_unique_filename, random_state) predictions, labels = prediction.predict() result = {'prediction': int(predictions[0]), 'label': str(labels[0])} #dummy # result = {'prediction': 1, 'label': 'Good Loan'} output = {'result': result} output = result msg = __name__ + '.' + utils.get_function_caller() + ' -> exit' log.print(msg) return jsonify(results=output)
def predict(self, instance): ''' :param instance: an object of type Instance :return: a prediction object ''' y_predictions = [] for time_series_idx in range(instance.n_time_series): istsv = instance.getViewForTimeSeries(time_series_idx, self.time_sampler) y_predictions.append(self.correlation_metric(istsv)) y_pred = self.combiner(y_predictions) return Prediction(instance.graph_adjecency_matrix, y_pred, self)
def webhook(): try: ml_prediction except NameError: ml_prediction = Prediction(MODEL_DIR) ml_prediction.load_model() answer = '' if request.method == 'POST': try: data = json.loads(request.data) bot_id = data['recipient']['id'] bot_name = data['recipient']['name'] recipient = data['from'] service = data['serviceUrl'] sender = data['conversation']['id'] text = data['text'] bot.send_message(bot_id, bot_name, recipient, service, sender, ml_prediction.response(text, sender)) except Exception as e: print(e) if request.method == 'GET': question = request.args.get('q') answer = ml_prediction.response(question if question else 'Hi') return 'Code: 200. {}'.format(answer)
def infer(args): with open(args.classifierModel, 'rb') as f: if sys.version_info[0] < 3: (le, clf) = pickle.load(f) else: (le, clf) = pickle.load(f, encoding='latin1') preds = [] fold = args.imgs[0] all_imgs = [ os.path.join(fold, file) for file in os.listdir(fold) if os.path.isfile(os.path.join(fold, file)) ] for img in all_imgs: print(img) print("\n=== {} ===".format(img)) reps = getRep(img) if len(reps) > 1: print("List of faces in image from left to right") for r in reps: rep = r.reshape(1, -1) start = time.time() predictions = clf.predict_proba(rep).ravel() maxI = np.argmax(predictions) person = le.inverse_transform(maxI) target_person = os.path.basename(img).split('_')[0] confidence = predictions[maxI] preds += [ Prediction( filename=img, correct=(target_person == int(person)), true_class=target_person, pred_class=int(person), pred_yaw=0, #TODO, pred_pitch=0, #TODO, pred_roll=0, #TODO, pred_pose=0, #TODO, # viewpoint pred_illumination=0, #TODO, pred_expression=0, #TODO, distance=None, confidence=confidence) ] if args.verbose: print("Prediction took {} seconds.".format(time.time() - start)) print("Predict {} with {:.2f} confidence.".format( person.decode('utf-8'), confidence)) if isinstance(clf, GMM): dist = np.linalg.norm(rep - clf.means_[maxI]) print(" + Distance from the mean: {}".format(dist)) write_to_csv(preds, "openface")
def main(): """ main path of execution """ # parse arguments args = parseArguments() # locate jpeg training images files = glob.glob(os.path.join(args.data_path, 'images/*.jpg')) if len(files) > 0: # open first image to extract projection ds = gdal.Open(files[0]) if ds is not None: # create prediction object obj = Prediction(args.model_pathname, args.out_pathname, ds.GetProjection()) # compute inference and add detections to output shape file for f in files: obj.process(f, writeback=False) # read shapefile gdf = gpd.read_file(args.out_pathname) # locate isolated features bad_rows = [] for idx, row in gdf.iterrows(): if sum(gdf['geometry'].buffer(100).intersects(row['geometry'])) < 5: bad_rows.append(idx) # drop isolated features gdf = gdf.drop(bad_rows) gdf.to_file(args.out_pathname.replace('.shp', '-update.shp')) return
def main(data_to_predict_json): print('### Delivery Time Prediction App ###') pred = Prediction(data_to_predict_json) pred.predict()