def main(): try: Logger.open() Logger.sendTelegramMessage("Bot started...") exchange = ExchangeWrapperForBacktesting() analyst = Analyst(exchange) marketRepos = dict( ) # TODO: Markets can appear and disappear, with time... for marketName in [ x["MarketName"] for x in exchange.getMarketSummary() if x["BaseVolume"] > 3000 ]: marketRepos[marketName] = MarketStatusRepository(marketName) Logger.log("Markets: " + " ".join(marketRepos.keys())) while True: for market in marketRepos.keys(): # TODO: the iterations of this loop don't depend on each other, so we can parallelize marketRepos[market].addTick(exchange.getCurrentCandle(market)) analyst.doTrading(marketRepos.values()) exchange.wait() Logger.close() except: exceptionInfo = traceback.format_exc() Logger.log(exceptionInfo) Logger.close()
def test_buy_sell_bad(self): a = Analyst(100) buy = a.buyLong(50, 3) self.assertFalse(buy) self.assertAnalyst(a, 100, 0, 0) sell = a.sellLong(55, 3) self.assertFalse(sell) self.assertAnalyst(a, 100, 0, 0)
def test_buy_long_good(self): a = Analyst(10000) a.buyLong(50, 3) cash = 10000 - 3*50 self.assertAnalyst(a, cash, 3, 0) a.buyLong(55, 10) cash -= 55*10 self.assertAnalyst(a, cash, 13, 0)
def study_strategies(presets): analyst = Analyst() for preset in presets.index: intern = Intern(strategy, preset) prices, signals, labels = intern.get_all_data() intern.create_samples(prices, signals, labels) scientist_deligate(8) analyst.get_parcel() return analyst.get_parcel()
def test_rebalance(self): a = Analyst(1000) a.rebalancePortfolio(0.2, 0.4, 10, 20) portfolio = a.getPortfolio(10, 20) self.assertPortfolio(portfolio, .4, .2, .4) a.rebalancePortfolio(.4, .2, 10, 20) portfolio = a.getPortfolio(10, 20) self.assertPortfolio(portfolio, .4, .4, .2)
def test_average_cost(self): a = Analyst(1000) a.buyLong(10.2,2) a.buyLong(20.66,5) a.buyLong(30.66, 17) avg = (30.66*17 + 20.66 * 5 + 10.2 * 2) / (2+5+17) self.assertAlmostEqual(a.longs.avgCost, avg) a.sellLong(30, 15) a.buyLong(40.1, 8) self.assertAlmostEqual(a.longs.avgCost, 33.09676470588235)
def __init__(self): params = Config.params # Setups the logger self.logger = logging.getLogger(__name__) # Setups the clock self.clock = Clock() # Sets up the airport airport_name = params["airport"] self.airport = Airport.create(airport_name) # Sets up the scenario self.scenario = Scenario.create( airport_name, self.airport.surface) # Sets up the routing expert monitoring the airport surface self.routing_expert = RoutingExpert(self.airport.surface.links, self.airport.surface.nodes, params["simulation"]["cache"]) # Sets up the uncertainty module self.uncertainty = (Uncertainty(params["uncertainty"]["prob_hold"], params["uncertainty"]["speed_bias_sigma"], params["uncertainty"]["speed_bias_mu"]) if params["uncertainty"]["enabled"] else None) # Loads the requested scheduler self.scheduler = get_scheduler() if not params["simulator"]["test_mode"]: # Sets up the analyst self.analyst = Analyst(self) # Sets up the state logger self.state_logger = StateLogger() # Initializes the previous schedule time self.last_schedule_time = None # Initializes the last execution time for rescheduling to None self.last_schedule_exec_time = None self.__print_stats()
else: p.set_title('Fixed Cluster-like 2D Test Set') p.grid(True) p.set_aspect('equal', 'datalim') for i, tup in enumerate(self.data): p.annotate(str(i), tup) plt.show() # Brief script-like behavior for development, debugging, and testing purposes: if __name__ == "__main__": from analyst import Analyst from analyst.evaluators import NucleusClusterizer s = TestSet2D(random=False) a = Analyst( embeddings=s, metric="euclidean", encoder=s.encode, decoder=s.decode, desc="2D Debug Test", evaluators=[ #"Nodal 3-Hubs", NucleusClusterizer(hub_category="Nodal 3-Hubs"), "Hubs", "Nuclei", "Nodes" ], make_distance_matrix=True) Analyst.save(a, "an_2d_debug_test")
import numpy as np from analyst import Analyst, Command, DataSource, DataWorker, Pointer from frameworks import framework frameworks = framework() pandas = frameworks.pandas identity = Analyst("Alice", port=65442, host="127.0.0.1") worker = DataWorker(port=65441, host="127.0.0.1") dataset = DataSource(identity, worker, "Sample Data").init_pointer() dataset["Number of times pregnant"].mean() dataset["Number of times pregnant"].mean() # dataset.fillna(0) # print(dataset[dataset.isnull()].sum().get()) """ print(dataset.columns) print(dataset["Number of times pregnant"].sum().get()) print( dataset["Plasma glucose concentration a 2 hours in an oral glucose tolerance test"] .sum() .get() ) dataset = dataset[ [ "Diastolic blood pressure (mm Hg)", "2-Hour serum insulin (mu U/ml)",
def test_analyst(self): file_path = './test/sample.jpg' analyst = Analyst() analyst(file_path)
#p.set_ylabel('y') if self.random: p.set_title('Random 2D Test Set') else: p.set_title('Fixed Cluster-like 2D Test Set') p.grid(True) p.set_aspect('equal', 'datalim') for i, tup in enumerate(self.data): p.annotate(str(i), tup) plt.show() # Brief script-like behavior for development, debugging, and testing purposes: if __name__ == "__main__": from analyst import Analyst from analyst.evaluators import NucleusClusterizer s = TestSet2D(random=False) a = Analyst(embeddings=s, metric="euclidean", encoder=s.encode, decoder=s.decode, desc="2D Debug Test", evaluators=[ "Nodal 3-Hubs", "Nuclei", "Nodes", "Supernodes", "Extremities", "7-KMeans", "Spatial" ], make_distance_matrix=True, over_write=True) Analyst.save(a, "an_2d_debug_test")
if self.random: p.set_title('Random 2D Test Set') else: p.set_title('Fixed Cluster-like 2D Test Set') p.grid(True) p.set_aspect('equal', 'datalim') for i, tup in enumerate(self.data): p.annotate(str(i), tup) plt.show() # Brief script-like behavior for development, debugging, and testing purposes: if __name__ == "__main__": from analyst import Analyst t = TestSet2D() r = TestSet2D(random=True, seed=19680801) at = Analyst(t.data, "euclidean", t.encode, t.decode, desc="Contrived 2D Test Set") ar = Analyst(r.data, "euclidean", r.encode, r.decode, desc="Random 2D Test Set") at.compare_difference(ar) Analyst.save(at, "an_2d_contrived") Analyst.save(ar, "an_2d_random")
root_dir = "images/{0:%Y}/{0:%m}/{0:%d}".format(now) os.makedirs(root_dir, exist_ok=True) return root_dir def get_save_dir(ip, token): root_dir = get_root_dir() save_dir = os.path.join(root_dir, token) os.makedirs(save_dir, exist_ok=True) return save_dir analyst = Analyst() @post('/analyst/upload') def upload(): image = request.files.get('image') ip = request.environ.get('REMOTE_ADDR') token = get_token(ip) save_dir = get_save_dir(ip, token) image = Image.open(image.file) image_path = '{}/orig.jpg'.format(save_dir) image.save(image_path) ids = analyst(image_path)
url + "Norge/S%C3%B8r-Tr%C3%B8ndelag/Orkdal/S%C3%B8vatnet/varsel.xml"), "Åre": WeatherMonitor("Åre", url + "Sverige/J%C3%A4mtland/%C3%85re/varsel.xml"), "Stranda": WeatherMonitor("Stranda", url + "Norge/Møre_og_Romsdal/Stranda/Stranda/varsel.xml"), "Overøye": WeatherMonitor("Overøye", url + "Norge/Møre_og_Romsdal/Stranda/Stranda/varsel.xml"), "Vassfjellet": WeatherMonitor( "Vassfjellet", url + "Norge/Sør-Trøndelag/Klæbu/Vassfjellet_skisenter/varsel.xml"), } analyst = Analyst(temperature_upper_limit=1.0, precipitation_lower_limit=5.0) limit = 3 while True: try: print "Fetching data, making rec. and sending data... (" + strftime( "%Y-%m-%d %H:%M", gmtime()) + ")" start_time = time.time() recommendations = [] for elem in monitors.keys(): data = monitors[elem].get_daily_data(limit) local_recommendations = analyst.get_recommendation(data, elem) recommendations.extend(local_recommendations)
def test_init(self): a = Analyst(10000) self.assertAnalyst(a, 10000, 0, 0)
def main(): app = QApplication(sys.argv) chat = Analyst() sys.exit(app.exec_())
def test_buy_sell_long(self): a = Analyst(150) a.buyLong(50, 3) a.sellLong(55, 3) cash = 150 - (3 * 50) + (3 * 55) self.assertAnalyst(a, cash, 0, 0)
def test_get_portfolio(self): a = Analyst(1000) a.buyLong(10.2, 2) a.buyShort(22, 3) portfolio = a.getPortfolio(10.2, 22) self.assertPortfolio(portfolio, 0.9136, 10.2 * 2 / 1000, 22 * 3 / 1000)