def test_call(fgakc): f, g, a, k, cache = fgakc cf, cg = cache(f), cache(g) r1, r3 = cf(*a, **k), cg(*a, **k) r2, r4 = cf(*a, **k), cg(*a, **k) assert f.call_count == 1 assert g.call_count == 1 assert f.call_args == (a, k) assert g.call_args == (a, k) assert r1 == r2 assert r3 == r4 assert r2 != r3
def transfer_values_cache(cache_path, model, images=None, image_paths=None): """ This function either loads the transfer-values if they have already been calculated, otherwise it calculates the values and saves them to a file that can be re-loaded again later. Because the transfer-values can be expensive to compute, it can be useful to cache the values through this function instead of calling transfer_values() directly on the Inception model. See Tutorial #08 for an example on how to use this function. :param cache_path: File containing the cached transfer-values for the images. :param model: Instance of the Inception model. :param images: 4-dim array with images. [image_number, height, width, colour_channel] :param image_paths: Array of file-paths for images (must be jpeg-format). :return: The transfer-values from the Inception model for those images. """ # Helper-function for processing the images if the cache-file does not exist. # This is needed because we cannot supply both fn=process_images # and fn=model.transfer_values to the cache()-function. def fn(): return process_images(fn=model.transfer_values, images=images, image_paths=image_paths) # Read the transfer-values from a cache-file, or calculate them if the file does not exist. transfer_values = cache(cache_path=cache_path, fn=fn) return transfer_values
def __init__(self, prob, param): #TODO pass Kernel prob.l, prob.x, param self.cache = cache( prob.l, param.cache_size) #TODO (long int)(param.cache_size*(1<<20)) self.QD = numpy.zeros(prob.l) for i in range(prob.l): self.QD[i] = self.kernel_function(i, i)
def load_cached(cache_path, in_dir): """ Wrapper-function for creating a DataSet-object, which will be loaded from a cache-file if it already exists, otherwise a new object will be created and saved to the cache-file. This is useful if you need to ensure the ordering of the filenames is consistent every time you load the data-set, for example if you use the DataSet-object in combination with Transfer Values saved to another cache-file, see e.g. Tutorial #09 for an example of this. :param cache_path: File-path for the cache-file. :param in_dir: Root-dir for the files in the data-set. This is an argument for the DataSet-init function. :return: The DataSet-object. """ print("Creating dataset from the files in: " + in_dir) # If the object-instance for DataSet(in_dir=data_dir) already # exists in the cache-file then reload it, otherwise create # an object instance and save it to the cache-file for next time. dataset = cache(cache_path=cache_path, fn=DataSet, in_dir=in_dir) return dataset
def test_callable(): g = np.random.random f = lambda g: g() cache = C() cf = cache(f) _ = cf(g) _ = cf(g) keys = cache.d.keys() assert len(keys) == 1
def __init__(self): self.cache = cache() self.cache_input = True self.cache_results = True self.cache_chart = True self.logs = logs() #configurable variables self.input_file_name = "" self.score_only = False #set to true to only calculate what is required for scoring a strategy #to speed up performance. self.shares = 0.1 #order size self.wll = 180 #window length long self.wls = 2 #window length short self.buy_wait = 0 #min sample periods between buy orders self.buy_wait_after_stop_loss = 6 #min sample periods between buy orders #after a stop loss order self.markup = 0.01 #order mark up self.stop_loss = 0.282 #stop loss self.enable_flash_crash_protection = True #convert a stop loss order into a short term hold position self.flash_crash_protection_delay = 180 #max_hold in minutes self.stop_age = 10000 #stop age - dump after n periods self.atr_depth = 60 * 1 #period depth of the averae true range, used to split input data into quartiles self.macd_buy_trip = -0.66 #macd buy indicator self.rsi_enable = 0 #enable/disable the relative strength indicator self.rsi_length = 1 #RSI length self.rsi_period_length = 10 #RSI period length self.rsi_gate = 50 #RSI gate (RSI must be below gate to enable buy orders) self.min_i_pos = 0 #min periods of increasing price #before buy order placed self.min_i_neg = 0 #min periods of declining price #before sell order placed self.stbf = 2.02 #short trade biasing factor #-- increase to favor day trading #-- decrease to 2 to eliminate bias self.nlsf = 5.0 #non-linear scoring factor - favor the latest trades #max factor = exp(self.nlsf) @ the last sample periord self.commision = 0.006 #mt.gox commision self.quartile = 1 #define which market detection quartile to trade on (1-4) self.input_data = [] self.input_data_length = 0 self.market_class = [] self.current_quartile = 0 self.classified_market_data = False self.max_length = 1000000 self.reset() return
def process_images_val(filenames_val): print("Processing {0} images in validation-set ...".format(len(filenames_val))) # Path for the cache-file. cache_path = os.path.join(coco.data_dir, "transfer_values_val.pkl") # If the cache-file already exists then reload it, # otherwise process all images and save their transfer-values # to the cache-file so it can be reloaded quickly. transfer_values = cache(cache_path=cache_path, fn=process_images, data_dir=coco.val_dir, filenames=filenames_val) return transfer_values
def __init__(self, prob, param): # TODO pass Kernel prob.l, prob.x, param self.l = prob.l self.cache = cache(l, param.cache_size) # TODO (long int)(param.cache_size*(1<<20)) self.QD = numpy.zeros(2 * l, dtype=float) self.sign = numpy.zeros(2 * l, dtype=int) self.index = numpy.zeros(2 * l, dtype=int) for k in range(l): self.sign[k] = 1 self.sign[k + l] = -1 self.index[k] = 1 self.index[k + l] = -1 self.QD[k] = self.kernel_function(k, k) self.QD[k + l] = self.QD[k] self.buffer = numpy.zeros([2, 2 * l], dtype=float) self.next_buffer = 0
def __init__(self, prob, param): #TODO pass Kernel prob.l, prob.x, param self.l = prob.l self.cache = cache( l, param.cache_size) #TODO (long int)(param.cache_size*(1<<20)) self.QD = numpy.zeros(2 * l, dtype=float) self.sign = numpy.zeros(2 * l, dtype=int) self.index = numpy.zeros(2 * l, dtype=int) for k in range(l): self.sign[k] = 1 self.sign[k + l] = -1 self.index[k] = 1 self.index[k + l] = -1 self.QD[k] = self.kernel_function(k, k) self.QD[k + l] = self.QD[k] self.buffer = numpy.zeros([2, 2 * l], dtype=float) self.next_buffer = 0
def __init__(self): self.cache = cache() # configurable variables self.input_file_name = "./datafeed/bcfeed_mtgoxUSD_1min.csv" # default input file self.score_only = False # set to true to only calculate what is required for scoring a strategy # to speed up performance. self.shares = 0.1 # order size self.wll = 180 # window length long self.wls = 2 # window length short self.buy_wait = 0 # min sample periods between buy orders self.buy_wait_after_stop_loss = 6 # min sample periods between buy orders # after a stop loss order self.markup = 0.01 # order mark up self.stop_loss = 0.282 # stop loss self.enable_flash_crash_protection = True # convert a stop loss order into a short term hold position self.flash_crash_protection_delay = 180 # max_hold in minutes self.stop_age = 10000 # stop age - dump after n periods self.macd_buy_trip = -0.66 # macd buy indicator self.min_i_pos = 0 # min periods of increasing price # before buy order placed self.min_i_neg = 0 # min periods of declining price # before sell order placed self.stbf = 2.02 # short trade biasing factor # -- increase to favor day trading # -- decrease to 2 to eliminate bias self.nlsf = 5.0 # non-linear scoring factor - favor the latest trades # max factor = exp(self.nlsf) @ the last sample periord self.commision = 0.006 # mt.gox commision self.quartile = 1 # define which market detection quartile to trade on (1-4) self.input_data = [] self.input_data_length = 0 self.market_class = [] self.current_quartile = 0 self.classified_market_data = False self.max_data_len = 1000000 self.reset() return
def __init__(self): self.cache = cache() #configurable variables self.input_file_name = "./datafeed/bcfeed_mtgoxUSD_1min.csv" #default input file self.score_only = False #set to true to only calculate what is required for scoring a strategy #to speed up performance. self.shares = 0.1 #order size self.wll = 180 #window length long self.wls = 2 #window length short self.buy_wait = 0 #min sample periods between buy orders self.buy_wait_after_stop_loss = 6 #min sample periods between buy orders #after a stop loss order self.markup = 0.01 #order mark up self.stop_loss = 0.282 #stop loss self.enable_flash_crash_protection = True #convert a stop loss order into a short term hold position self.flash_crash_protection_delay = 180 #max_hold in minutes self.stop_age = 10000 #stop age - dump after n periods self.macd_buy_trip = -0.66 #macd buy indicator self.min_i_pos = 0 #min periods of increasing price #before buy order placed self.min_i_neg = 0 #min periods of declining price #before sell order placed self.stbf = 2.02 #short trade biasing factor #-- increase to favor day trading #-- decrease to 2 to eliminate bias self.nlsf = 5.0 #non-linear scoring factor - favor the latest trades #max factor = exp(self.nlsf) @ the last sample periord self.commision = 0.006 #mt.gox commision self.quartile = 1 #define which market detection quartile to trade on (1-4) self.input_data = [] self.input_data_length = 0 self.market_class = [] self.current_quartile = 0 self.classified_market_data = False self.max_data_len = 1000000 self.reset() return
def __init__(self): self.cache = cache() self.cache_input = True self.cache_results = False self.cache_chart = False self.logs = logs() self.input_file_name = "" self.text_summary = "" #text summary of the results self.input_data = [] self.classification = [] self.input_data_length = 0 self.current_quartile = 0 self.period = 0 self.positions = [] self.score_only = False #set to true to only calculate what is required for scoring a strategy #to speed up performance. self.max_length = 10000000 self.enable_flash_crash_protection = False self.flash_crash_protection_delay = False self.reset() return
users = {} loop = asyncio.get_event_loop() users = await loop.run_in_executor(None, get_by_time, time) print(users['category']) if users['usernames'] != []: for i in range(len(users['usernames'])): print(users['language'][i], users['category'][i]) try: await bot.send_message(users['usernames'][i], getCached(users['language'][i], users['category'][i]), disable_web_page_preview=True) if i % 29 == 0: await asyncio.sleep(0.03, loop=loop) except: print("Error") else: print('Error') await asyncio.sleep(60, loop=loop) loop = asyncio.get_event_loop() loop.create_task(cache(loop)) loop.create_task(send_by_time(loop)) loop.create_task(send_lang(bot_language, loop)) loop.create_task(Adv.check_new_session(loop)) if __name__ == '__main__': executor.start_polling(dp, skip_updates=True)
def __init__(self, prob, param, y_): #TODO pass kernel prob.l, prob.x, param self.y = y_ self.cache = cache(prob.l, param.cache_size) #TODO self.QD = numpy.zeros(prob.l) for i range(prob.l): self.QD = self.kernel_function(i,i)
def __init__(self, prob, param): #TODO pass Kernel prob.l, prob.x, param self.cache = cache(prob.l, param.cache_size) #TODO (long int)(param.cache_size*(1<<20)) self.QD = numpy.zeros(prob.l) for i in range(prob.l): self.QD[i] = self.kernel_function(i,i)