def process_message(msg): global max_price global reached_goal global percentage_change global price_bought global cur_price cur_price = float(msg['p']) percent_from_max = utils.percent_change(max_price, cur_price) percent_from_bought = utils.percent_change(price_bought, cur_price) # COMMENT THIS LINE OUT IF YOU DON'T WANT TOO MUCH DATA print_trade_data(price_bought, cur_price, max_price, percent_from_max, percent_from_bought) if reached_goal == False and percent_from_bought >= sell_order_desired_percentage_profit: reached_goal = True utils.print_and_write_to_logfile("REACHED PRICE GOAL") if percent_from_max < sell_percent_down_to_sell and reached_goal == True: utils.print_and_write_to_logfile("PERCENT DOWN FROM PEAK: " + str(percent_from_max) + ". TIME TO SELL") try: reactor.stop() except: print("REACTOR ALREADY STOPPED") max_price = max(cur_price, max_price)
def handle_selling(bought_price, market, amount_bought): global max_price global reached_goal global percentage_change global price_bought global cur_price percentage_change = 0 reached_goal = False max_price = bought_price price_bought = bought_price wait_until_time_to_sell(market) status, order_id = binance_utils.limit_sell_on_binance(binance, market, amount_bought, cur_price, sell_order_underprice_percent) amount_sold = 0 utils.print_and_write_to_logfile("WAITING FOR SELL ORDER TO GO THROUGH") while status != 'FILLED': cur_price = binance_utils.get_most_recent_buy_order_price(binance, market) order = binance.get_order( symbol=market, orderId=order_id) status = order['status'] float(order['executedQty']) percent_change = utils.percent_change(bought_price, cur_price) time.sleep(seconds_before_checking_binance) utils.print_and_write_to_logfile(market + " SOLD")
def sell_after_pecentage_gain(bought_price, market, amount): sold = False while not sold: cur_price = binance_utils.get_cur_price_from_large_enough_buy_order(binance, market, amount) if utils.percent_change(bought_price, cur_price) > desired_gain_percent: sold = binance_utils.market_sell_on_binance(binance, market) if not sold: time.sleep(seconds_before_checking_binance_price)
def excess_demand(self, signal, time): """Excess demand function for moving averages based heuristic (group 1).""" m_window_average = moving_average(signal, time, self.m) n_window_average = moving_average(signal, time, self.n) price_change = percent_change(m_window_average, n_window_average) price_change_strengths = self.ppc_signals.grade(price_change) investor_signal_strengths = self.investor_signals.centers result = np.dot(investor_signal_strengths, price_change_strengths) result /= np.sum(price_change_strengths) return result
def compute_changes(self): LOG.debug("Calculating changes") fields = ( "infected", "tests", "vaccinated.doses" ) for day in self.data: for f in fields: d = day[f"{f}.today"] t = day[f"{f}.total"] day[f"{f}.delta_percent"] = percent_change(t, t - d)
def getMoodIndexAndChange(request): tweets = [] #nicknamesDict = utils.generateUniNameDict("./uniName.txt") collegeName = request.GET.get("text") if collegeName in nicknamesDict: searchKey = utils.advancedSearch([], any=nicknamesDict[collegeName]) else: searchKey = collegeName mostPositiveTweetID = "" maxPos = -1 mostNegativeTweetID = "" maxNeg = 1 for tweet in tweepy.Cursor(api.search, q=searchKey + " -filter:retweets", rpp=5, lang="en", tweet_mode="extended").items(100): instance = {} text = TextBlob(tweet.full_text) instance["timestamp"] = tweet.created_at instance["score"] = text.sentiment.polarity if (text.sentiment.polarity > maxPos): mostPositiveTweetID = tweet.id_str maxPos = text.sentiment.polarity if (text.sentiment.polarity < maxNeg): mostNegativeTweetID = tweet.id_str maxNeg = text.sentiment.polarity tweets.append(instance) #sort the tweets chronologically utils.sort_tweet(tweets) # for t in tweets: # print(t["timestamp"]) #split the data in to ten buckets bucketAverages = [] for i in range(0, 100, 5): bucketSum = 0 for j in range(i, i + 5): bucketSum += tweets[j]["score"] bucketAvg = bucketSum / 5 bucketAverages.append(bucketAvg) oldScoreSum = 0 for score in bucketAverages[0:10]: oldScoreSum += score oldScore = oldScoreSum / 10 newScoreSum = 0 for score in bucketAverages[10:20]: newScoreSum += score newScore = newScoreSum / 10 change = utils.percent_change(oldScore, newScore) return_val = JsonResponse({ "score": newScore, "change": change, "historical data": bucketAverages, "positive tweet id": mostPositiveTweetID, "negative tweet id": mostNegativeTweetID }) print("==============sss===========") print("==============sss===========") print("==============sss===========") return JsonResponse({ "score": newScore, "change": change, "historical data": bucketAverages, "positive tweet id": mostPositiveTweetID, "negative tweet id": mostNegativeTweetID })
def getOnlineIndexAndChange(request): tweets = [] anyWord = ["zoom", "online", "remote"] #nicknamesDict = utils.generateUniNameDict("./uniName.txt") collegeName = request.GET.get("text") if collegeName in nicknamesDict: anyWord.extend(nicknamesDict[collegeName]) #debug: if this is in place searchKey = utils.advancedSearch([], any=anyWord) else: searchKey = utils.advancedSearch([collegeName], any=anyWord) mostPositiveTweetID = "" maxPos = -1 mostNegativeTweetID = "" maxNeg = 1 for tweet in tweepy.Cursor(api.search, q=searchKey + " -filter:retweets", rpp=5, lang="en", tweet_mode="extended").items(50): instance = {} text = TextBlob(tweet.full_text) instance["timestamp"] = tweet.created_at instance["score"] = text.sentiment.polarity tweets.append(instance) if (text.sentiment.polarity > maxPos): mostPositiveTweetID = tweet.id_str maxPos = text.sentiment.polarity if (text.sentiment.polarity < maxNeg): mostNegativeTweetID = tweet.id_str maxNeg = text.sentiment.polarity utils.sort_tweet(tweets) oldScoreSum = 0 for instance in tweets[0:25]: oldScoreSum += instance["score"] oldScore = oldScoreSum / 25 count = 0 newScoreSum = 0 for instance in tweets[25:]: newScoreSum += instance["score"] count += 1 if count == 0: newScore = 0 else: newScore = newScoreSum / count print("==============aaa===========") print("==============aaa===========") print("==============aaa===========") change = utils.percent_change(oldScore, newScore) return JsonResponse({ "score": newScore, "change": change, "positive tweet id": mostPositiveTweetID, "negative tweet id": mostNegativeTweetID })
def getHealthIndexAndChange(request): tweets = [] anyWord = ["covid", "health", "social distancing", "virus", "safety"] #nicknamesDict = utils.generateUniNameDict("./uniName.txt") collegeName = request.GET.get("text") if collegeName in nicknamesDict: anyWord.extend(nicknamesDict[collegeName]) #debug: if this is in place searchKey = utils.advancedSearch([], any=anyWord) else: searchKey = utils.advancedSearch([collegeName], any=anyWord) mostPositiveTweetID = "" maxPos = -1 mostNegativeTweetID = "" maxNeg = 1 for tweet in tweepy.Cursor(api.search, q=searchKey + " -filter:retweets", rpp=5, lang="en", tweet_mode="extended").items(80): instance = {} text = TextBlob(tweet.full_text) instance["timestamp"] = tweet.created_at instance["score"] = text.sentiment.polarity tweets.append(instance) if (text.sentiment.polarity > maxPos): mostPositiveTweetID = tweet.id_str maxPos = text.sentiment.polarity if (text.sentiment.polarity < maxNeg): mostNegativeTweetID = tweet.id_str maxNeg = text.sentiment.polarity # sort the tweets and calculate new score and old score utils.sort_tweet(tweets) # split the data in to ten buckets bucketAverages = [] for i in range(0, len(tweets), 4): bucketSum = 0 for j in range(i, len(tweets)): bucketSum += tweets[j]["score"] bucketAvg = bucketSum / 4 bucketAverages.append(bucketAvg) oldScoreSum = 0 for score in bucketAverages[0:10]: oldScoreSum += score oldScore = oldScoreSum / 10 newScoreSum = 0 for score in bucketAverages[10:20]: newScoreSum += score newScore = newScoreSum / 10 change = utils.percent_change(oldScore, newScore) print("==============bbb===========") print("==============bbb===========") print("==============bbb===========") return JsonResponse({ "score": newScore, "change": change, "historical data": bucketAverages, "positive tweet id": mostPositiveTweetID, "negative tweet id": mostNegativeTweetID })
def get_time_series_inplane(coords,scan_file, f_c=0.01,up_sample_factor=[1,1,1], detrend=True,normalize=True,average=True, TR=None): """vista_get_time_series: Acquire a time series for a particular scan/ROI. Parameters ---------- coords: a list of arrays each array holds the X,Y,Z locations of an ROI (as represented in the Inplane) scan_file: string, full path to the analyze file of the scan TR: float the repetition time in the experiment up_sample_factor: float the ratio between the size of the inplane and the size of the gray (taking into account FOV and number of voxels in each dimension). Defaults to [1,1,1] - no difference detrend: bool, optional whether to detrend the signal. Default to 'True' normalize: bool, optional whether to transform the signal into % signal change. Default to 'True' average: bool, optional whether to average the resulting signal Returns ------- time_series: array, the resulting time_series Depending on the averaging flag, can have the dimensions 1*time-points or number-voxels*time-points. Notes ----- The order of the operations on the time-series is: detrend(on a voxel-by-voxel basis) => normalize (on a voxel-by-voxel basis) => average (across voxels, on a time-point-by-time-point basis) """ from nipy.io.imageformats import load #Get the nifti image object print 'Reading data from %s' %scan_file data = load(scan_file).get_data() #if using nipy.io.imageformats.load #Adjusted the coordinates according to the ratio between the #sampling in the gray and the sampling in the inplane, move the #slice dimension to be the first one and change the indexing from #1-based to 0-based. The coord order is as it is in the input, so need to #make sure that it is correct on the input side. this_data = data[np.round(coords[0]/up_sample_factor[0]).astype(int)-1, np.round(coords[1]/up_sample_factor[1]).astype(int)-1, np.round(coords[2]/up_sample_factor[2]).astype(int)-1] if normalize: this_data = tsu.percent_change(this_data) if average: this_data = np.mean(this_data,0) time_series = ts.UniformTimeSeries(data=this_data,sampling_interval=TR) if detrend: F = ta.FilterAnalyzer(this_bold,lb=f_c) time_series = F.filtered_boxcar return time_series
common_ids = loaded['common_ids'] profile_data = loaded['profile_data'] #Id to root mean square dict id_to_rmse = {} #Id to slope dict id_to_slope = {} #Id to max track difference before greatest percent gain in followerts id_to_track = {} for id in common_ids: # Get percent change set for followers #TODO: Should percent change be from minimum follower count??? points = utils.percent_change(profile_data[id]['followers_count']) x = np.array([1, 2, 3, 4, 5]) # Find regression line for percent change set regline = linregress(x, points) if regline.slope >= 0: # Calculate rmse rmse = 0 for i in range(len(points)): if (regline.slope > 0): predicted = (regline.slope * x[i]) + regline.intercept rmse += (predicted - points[i])**2 rmse = rmse / len(points) rmse = math.sqrt(rmse)
for f in vars.db_filenames: db = Database_Instance('../db_files/' + f) for a in db.query('''SELECT id, followers_count FROM surface_artist_data'''): if a[0] in id_to_followers: id_to_followers[a[0]].append(a[1]) else: id_to_followers[a[0]] = [a[1]] max_followers = [] max_percent_gain = [] max_followers_bad = [] max_percent_gain_bad = [] ids = [] ids_bad = [] for id in id_to_followers.keys(): try: max(id_to_followers[id]) max(utils.percent_change(id_to_followers[id])) max_followers_bad.append(max(id_to_followers[id])) max_percent_gain_bad.append(max(utils.percent_change(id_to_followers[id]))) except: print(id_to_followers[id]) plt.plot(max_followers, max_percent_gain, 'b.') plt.ylabel('Max Percent Gain') plt.xlabel('Max Followers') plt.show()