def lost_surplus(df, df_2018, hour=8, plot=False): date = df["start_date"].iloc[0] eight_hour_datetime = datetime.datetime(date.year, date.month, date.day, hour) date_2018 = df_2018["start_date"].iloc[0] eight_hour_datetime_2018 = datetime.datetime(date_2018.year, date_2018.month, date_2018.day, hour) bids_for_eight = df[(df["start_date"] <= eight_hour_datetime) & ( df["end_date"] >= eight_hour_datetime + datetime.timedelta(hours=1))] bids_for_eight_2018 = df_2018[ (df_2018["start_date"] <= eight_hour_datetime_2018) & (df_2018["end_date"] >= eight_hour_datetime_2018 + datetime.timedelta(hours=1))] load_bids = get_hour_load_curve(bids_for_eight) load_bids_2018 = get_hour_load_curve(bids_for_eight_2018) generator_bids = get_generator_bids(bids_for_eight) x1 = load_bids["cumsum_MW"].values y1 = load_bids["bid_price"].values x3 = load_bids_2018["cumsum_MW"].values y3 = load_bids_2018["bid_price"].values x2 = generator_bids["cumsum_MW"].values y2 = generator_bids["bid_price"].values x_2019, y_2019 = intersect.intersection(x1, y1, x2, y2) x_2018, y_2018 = intersect.intersection(x3, y3, x2, y2) if plot: plot_results(eight_hour_datetime, generator_bids, load_bids, load_bids_2018, x_2019, y_2019, x_2018, y_2018) if x_2019[0] <= x_2018[0]: x = generator_bids["cumsum_MW"][ (generator_bids["cumsum_MW"] >= x_2019[0]) & (generator_bids["cumsum_MW"] <= x_2018[0])] y_1 = [y_2019[0]] * len(x) y_2 = generator_bids["bid_price"][ (generator_bids["cumsum_MW"] >= x_2019[0]) & (generator_bids["cumsum_MW"] <= x_2018[0])] else: x = generator_bids["cumsum_MW"][ (generator_bids["cumsum_MW"] <= x_2019[0]) & (generator_bids["cumsum_MW"] >= x_2018[0])] y_1 = [y_2018[0]] * len(x) y_2 = generator_bids["bid_price"][ (generator_bids["cumsum_MW"] <= x_2019[0]) & (generator_bids["cumsum_MW"] >= x_2018[0])] calculus_df = pd.DataFrame() calculus_df["x"] = x calculus_df["diffy"] = y_2 - y_1 calculus_df["dx"] = calculus_df["x"] - calculus_df["x"].shift(1) surplus = np.nansum(calculus_df["dx"] * calculus_df["diffy"]) if x_2019[0] >= x_2018[0]: surplus = -surplus return surplus, (x_2019[0], y_2019[0]), (x_2018[0], y_2018[0])
def _getProjectedPoint(contour, center, point): direction = _normalize(center - point) rangeArray = np.asarray([np.arange(0, contour.size)]).T line = (rangeArray - contour.size / 2) * direction + point (crossX, crossY) = intersection(line[:, 0], line[:, 1], contour[:, 0], contour[:, 1]) projectedPoint = sorted(zip(crossX, crossY), key=lambda p: \ np.linalg.norm(np.asarray(p) - point))[0] return projectedPoint
def check_for_intersection(pts, width, display=True): curve_pos = offset_curve(pts, width/2) curve_neg = offset_curve(pts, -width/2) if display: plt.figure(2) plt.plot(curve_pos[:,0], curve_pos[:,1], color='r') plt.plot(curve_neg[:,0], curve_neg[:,1], color='b') plt.plot(pts[:,0], pts[:,1], color='g') plt.show() x, y = intersection(curve_pos[:,0], curve_pos[:,1],curve_neg[:,0], curve_neg[:,1]) return len(x) > 0 or check_for_self_intersection(curve_pos) or check_for_self_intersection(curve_neg)
def test_basic(): a, b = 1, 2 phi = np.linspace(3, 10, 100) x1 = a * phi - b * np.sin(phi) y1 = a - b * np.cos(phi) x2 = phi y2 = np.sin(phi) + 2 x, y = intersection(x1, y1, x2, y2) assert pytest.approx(x) == np.array([6.10765984, 8.36483107]) assert pytest.approx(y) == np.array([1.82539714, 2.87208714])
def test_bug_overlapping_lines(): """ more info https://github.com/sukhbinder/intersection/issues/1 """ x1 = [0., 0., 1., 1., 1., 2., 2., 2.] y1 = [100., 25., 25., 25., 20., 20., 20., 0.] x2 = [0., 0., 2., 2., 2., 4., 4., 4.] y2 = [0., 10., 10., 10., 20., 20., 20., 100.] x, y = intersection(x1, y1, x2, y2) assert pytest.approx(x) == np.array([2., 2., 2.]) assert pytest.approx(y) == np.array([20., 10., 20.])
def intersect(): p1 = seismic_events_list[0].coord r1 = seismic_events_list[0].distance_to_ep * 1000 # Meters p2 = seismic_events_list[1].coord r2 = seismic_events_list[1].distance_to_ep * 1000 # Meters inter1, inter2 = intersection(p1, r1, p2, r2) if (inter1 is not None and inter2 is not None): return jsonify(status='success', p1=inter1, p2=inter2) else: return jsonify(status=404)
def optimal_stopping_point( best_dist, y_std_failing, y_failing, parameters_failing, y_std_passing, y_passing, parameters_passing, ): """ Predict Optimal Stopping Point. This function takes the best_distribution, failing and passing distributions and parameters and returns an optimal stopping point for the test. """ dist = getattr(scipy.stats, best_dist) # Obtain the intersection points between the distribution curves x, y = intersection( y_failing, dist.pdf( y_std_failing, parameters_failing[0], parameters_failing[1], parameters_failing[2], ), y_passing, dist.pdf( y_std_passing, parameters_passing[0], parameters_passing[1], parameters_passing[2], ), ) osp = max(x) return osp
def findSlot(users, date): # day = date today = datetime.today() date = date.split(" ") if len(date) == 1: tstart = datetime.strptime( str(today.year) + '-' + str(today.month) + '-' + date[0] + " 08:00:00", "%Y-%B-%d %H:%M:%S") tstop = datetime.strptime( str(today.year) + '-' + str(today.month) + '-' + date[0] + " 23:59:59", "%Y-%B-%d %H:%M:%S") elif len(date) == 2: tstart = datetime.strptime( str(today.year) + '-' + date[1] + '-' + date[0] + ' 08:00:00', '%Y-%B-%d %H:%M:%S') tstop = datetime.strptime( str(today.year) + '-' + date[1] + '-' + date[0] + ' 23:59:59', '%Y-%B-%d %H:%M:%S') else: tstart = datetime.strptime( date[2] + '-' + date[1] + '-' + date[0] + ' 08:00:00', '%Y-%B-%d %H:%M:%S') tstop = datetime.strptime( date[2] + '-' + date[1] + '-' + date[0] + ' 23:59:59', '%Y-%B-%d %H:%M:%S') # Time format # appointments = ([ # { # 'start' : datetime.strptime('08:30:00', '%H:%M:%S'), # 'end' : datetime.strptime('10:00:00', '%H:%M:%S') # } # ]) user_freeTime = [] # appointments = [] # appointments.append(main(user, date)) appointments = main(users, date) # for idx, i in enumerate (appointments): # for j in i: # print(idx) # print(j['start']) # print(j['end']) # print(appointments) # print(len(appointments)) for user in appointments: free_time = [] # print(user) tp = [(tstart, tstart)] for t in user: tstart2 = convertDatetime( t['start']['dateTime'] ) #call convertDatetime function to convert string in the dict to dateTime object tend = convertDatetime(t['end']['dateTime']) tp.append((tstart2, tend)) tp.append((tstop, tstop)) # print(tp) for i, v in enumerate(tp): if i > 0: if (tp[i][0] - tp[i - 1][1]) > timedelta( minutes=30 ): #check the freetime that should be more than 30 minutes tf_start = tp[i - 1][1] delta = tp[i][0] - tp[i - 1][1] tf_end = tf_start + delta free_time.append((dict( start=(dict( dateTime=tf_start.strftime("%Y-%m-%dT%H:%M:%S"))), end=(dict( dateTime=tf_end.strftime("%Y-%m-%dT%H:%M:%S")))))) user_freeTime.append(free_time) # print(user_freeTime) # print() print(intersection(user_freeTime)) return intersection(user_freeTime)[0]
def pvplot(P_dir, U_dir, prob_dir): ''' P_dir: the path and name of positive data U_dir: the path and name of unlabeled data prob_dir: the path and name of the predict probability ''' data_P = pd.read_csv(f'{P_dir}') data_U = pd.read_csv(f'{U_dir}') predict_data = pd.read_csv(f'{prob_dir}') true_labels = np.zeros(shape=(data_U.shape[0])) true_labels[:data_P.shape[0]] = 1.0 predict_data = predict_data.iloc[:, 1] low_p = min(predict_data) high_p = max(predict_data) predict_data = np.array(predict_data) for i in np.arange(low_p, high_p, 0.01): p_label_v = np.int64(predict_data < i) p_v = (sum(p_label_v)) / true_labels.shape[0] pv.append(p_v) p_r_p = recall_score(true_labels, p_label_v) pr_p.append(p_r_p) p_label_r = np.int64(predict_data > i) p_r = recall_score(true_labels, p_label_r) pr.append(p_r) # fit the curve x = np.arange(low_p, high_p, 0.01) fv = np.polyfit(x, pv, 18) fr = np.polyfit(x, pr, 18) fv = np.poly1d(fv) fr = np.poly1d(fr) fv = fv(x) fr = fr(x) #calculate the intersection x_i, y_i = intersection(x, fv, x, fr) #P-V plot fig, ax = plt.subplots(figsize=(8, 8)) #the percentage occupied known ore bodies volumes of the corresponding prospecting probabilities ax.legend(loc='upper left', shadow=False, fontsize=12) ax.plot(x, pv, 'g', label='Area') ax.set_xlabel('Predictive probability', fontsize=20) ax.set_ylabel('Percentage of the known orebodies', fontsize=20) plt.tick_params(labelsize=20) ax.set_ylim([-0.05, 1.05]) ax.set_xlim([-0.05, 1.05]) ax.legend(loc=3, fontsize=14) #the percentage occupied 3D geological model volumes of the corresponding prospecting probabilities ax1 = ax.twinx() ax1.plot(x, pr_p, 'r', label='Prediction rate') ax1.plot(x_i, 1 - y_i, 'bo', markersize=8, zorder=3) ax1.annotate(r'intersection', xy=(x_i, 1 - y_i), xytext=(50, 0), textcoords='offset points', fontsize=16, arrowprops=dict(arrowstyle='->', connectionstyle='arc3')) ax1.set_ylabel('Percentage of the study area', fontsize=20) plt.tick_params(labelsize=20) ax1.set_ylim([1.05, -0.05]) ax1.legend(loc=4, fontsize=14) plt.show() # x_i, y_i is the intersection return x_i, y_i