def wasserstein_cluster(trucks, interval_time, interval_count, start_time, polygon): move_data = shrink_data(trucks) grid_loc = PointProcess.locs_for_wasserstein( start_time=start_time, num_projections=interval_count, top_percent=60) coordinates = np.array(polygon['coordinates']) polygon = Polygon(coordinates) for i in range(len(grid_loc)): point = Point(grid_loc[i][0], grid_loc[i][1]) if (polygon.contains(point) != True): grid_loc[i][2] = 0 end_time = start_time + datetime.timedelta(seconds=15 * 60 * interval_count) cluster = Cluster(grid_loc, len(move_data)) cluster.set_centers(move_data[:, 0:2], len(move_data)) cluster.cluster_assignment() current = cluster.get_expected() lam = cluster.learn_lam(1, False) data = grid_loc centers = cluster.get_centers() expected = cluster.get_expected() centers = close_assignment(centers, move_data) return centers, expected, current
def __init__(self, windowSize, distanceLimit, lineThinknessPx): self.windowSize = windowSize self.lineEximiner = PointProcess.LiniarityExaminer( inferiorCorrLimit=0.7, lineThinkness=lineThinknessPx) self.distanceLimit = distanceLimit self.supLimitNrNonZero = np.max(windowSize) * lineThinknessPx * 2.5 self.infLimitNrNonZero = np.min(windowSize) * lineThinknessPx * 0.3
def ProcessUpdate(name): fields = [ 'XCOORD', 'YCOORD', 'CALL_TYPE_FINAL_D', 'CALL_TYPE_FINAL', 'DATE_TIME' ] name = pd.read_csv(name, usecols=fields) name['DATE_TIME'] = pd.to_datetime(name['DATE_TIME'], format='%Y-%m-%d %H:%M:%S') name = name.sort_values(by='DATE_TIME') msg = PointProcess.update_from_new_inputs(name) return msg
def __init__(self, nrSlice, frameSize, pxpcm, windowSize): self.nrSlice = nrSlice self.windowSize = windowSize partSize = (frameSize[0], frameSize[1] // nrSlice) print('Window size:', windowSize, 'Part size:', partSize) self.histogramProc = HistogramProcessingFnc.HistogramProcessing( 0.002777778, 0.043570226, lineThinkness=2 * 5, xDistanceLimit=windowSize[0] // 2, partSize=partSize) # self.histogramProc = myCpy.HistogramProcessing( # 0.009777778, 0.023570226, partSize[0], partSize[1], 2*pxpcm, windowSize[0]//2) # # 20,0.002777778,0.023570226,partSize[0],partSize[1],2*5,partSize[1]//2 # self.slicingMethod = myCpy.SlicingMethod( # nrSlice, 0.002777778, 0.023570226, partSize[0], partSize[1], 2*pxpcm, partSize[1]//2) self.liniarityExaminer = PointProcess.LiniarityExaminer( inferiorCorrLimit=0.9, lineThinkness=2.2 * pxpcm) self.pointConnectivity = PointProcess.PointsConnectivity(windowSize)
def emergencies(): start_time = request.args.get('start_time') interval_count = request.args.get('interval_count') interval_count = int(interval_count) if (request.args.get('time_interval')): time_interval = request.args.get('time_interval') time_interval = int(time_interval) else: time_interval = 15 start_time = datetime.datetime.utcfromtimestamp(float(start_time)) total_output = [] predictions, times, increment = PointProcess.get_events_for_api( start_time=start_time, num_periods=interval_count, time_step=time_interval, top_percent=0) pred_max = 0 for j in range(int(interval_count)): output = { 'start': times[j], 'interval_length': increment, 'emergencies': [] } for i in range(len(predictions[0])): output['emergencies'].append({ 'intensity': predictions[j][i][2], 'location': { 'lat': predictions[j][i][0], 'long': predictions[j][i][1] } }) if (predictions[j][i][2] > pred_max): pred_max = predictions[j][i][2] im_save = { 'intensity': predictions[j][i][2], 'location': { 'lat': predictions[j][i][0], 'long': predictions[j][i][1] } } total_output.append(output) print(len(output['emergencies'])) with open('GET.json', 'w') as fp: json.dump(im_save, fp) return jsonify(total_output)
def SingleProcessUpdate(): ''' http://127.0.0.1:5000/SingleProcessUpdate?xcoord=-86.43&ycoord=39.14×tamp=1532959162 info should have form of: xcoord, ycoord, unix timestamp ''' xcoord = [float(request.args.get('xcoord'))] ycoord = [float(request.args.get('ycoord'))] time = request.args.get('timestamp') time_stamp = datetime.datetime.utcfromtimestamp(float(time)) time_stamp = [time_stamp] update_df = {'XCOORD': xcoord, 'YCOORD': ycoord, 'DATE_TIME': time_stamp} update_df = pd.DataFrame(update_df) msg = PointProcess.update_from_new_inputs(update_df) return msg
def wasserstein_cluster(trucks, interval_time, interval_count, start_time): move_data = shrink_data(trucks) grid_loc = PointProcess.locs_for_wasserstein( start_time=start_time, num_projections=interval_count, top_percent=60) end_time = start_time + datetime.timedelta(seconds=15 * 60 * interval_count) cluster = Cluster(grid_loc, len(move_data)) cluster.set_centers(move_data[:, 0:2], len(move_data)) lam = cluster.learn_lam(1, False) data = grid_loc centers = cluster.get_centers() centers = close_assignment(centers, move_data) return centers