def test_merge_requires_dicts(): """ Test that merge requires all arguments to be dictionaries """ try: merge('foo', 'bar') except AssertionError: assert True return assert False
def test_merge_raises_exception_with_one_argument(): """ Test that merge fails with only one argument """ try: merge({'foo', 'bar'}) except AssertionError: assert True return assert False
def test_merge_raises_exception_with_no_arguments(): """ Test that merge fails with no arguments """ try: merge() except AssertionError: assert True return assert False
def sample(FLAGS, sess): # Model z = tf.placeholder(tf.float32, shape=[None, Z_DIM]) latent_c = tf.placeholder(shape=[None, C_DIM], dtype=tf.float32) g_model = generator(z, latent_c) # Restore saver = tf.train.Saver() checkpoints = [x for x in glob(FLAGS.logdir + '/checkpoint-*') if 'meta' not in x] checkpoints = [tf.train.latest_checkpoint(FLAGS.logdir)] for checkpoint in checkpoints: saver.restore(sess, checkpoint) # Save samples output = "samples/%s.png" % os.path.basename(checkpoint) samples = 144 width = math.sqrt(samples) # Input z_batch = np.random.uniform(-1.0, 1.0, size=[samples, Z_DIM]).astype(np.float32) c_batch = np.zeros((samples, C_DIM)) if 0: for i in range(8): c_batch[i * width:(i + 1) * width, i] = np.linspace(-1, 1, width) else: c_batch[:, 0] = np.tile(np.linspace(-1, 1, width), width) c_batch[:, 1] = np.repeat(np.linspace(-1, 1, width), width) # Run and save images = sess.run(g_model, feed_dict={z: z_batch, latent_c: c_batch}) images = np.reshape( images, [samples, IMAGE_SIZE['resized'][0], IMAGE_SIZE['resized'][1], 3]) images = (images + 1.) / 2. scipy.misc.imsave(output, merge(images, [int(width)] * 2))
def enqeue_move(state, move): if is_valid_move(state, move): old_state_moves = state["moves"] old_state_moves.extend([move]) new_state = helpers.merge(state, {"moves": old_state_moves}) return new_state else: return state
def test_merge_scraps_duplicates_in_lists(): """ Test merging lists """ dict_1 = { 'list': ['foo', 'bar'] } dict_2 = { 'list': ['bar', 'far'] } merged = merge(dict_1, dict_2) assert numpy.array_equal( merged['list'], ['foo', 'bar', 'far'] )
def test_merge_accepts_lists(): """ Test merging lists """ dict_1 = { 'list': ['foo', 'bar'] } dict_2 = { 'list': ['boo', 'far'] } merged = merge(dict_1, dict_2) assert numpy.array_equal( merged['list'], ['foo', 'bar', 'boo', 'far'] )
def get(self, paths, flat=True): try: output = {} with open(self.filename, 'rb') as f: l = yaml.safe_load(f.read()) for path in paths: if path.strip('/'): output = merge(output, search(l, path)) else: return flatten(l) if flat else l return flatten(output) if flat else output except IOError as e: print(e, file=sys.stderr) if e.errno == 2: print("Please, run init before doing plan!") sys.exit(1) except TypeError as e: if 'object is not iterable' in e.args[0]: return dict() raise
def test_merge_with_deep_does_not_use_pointers(): """ Test merge function with shallow dictionaries """ dict_1 = { 'deep': { 'nested': { 'structure': True } } } dict_2 = { 'deep': { 'nested': { 'structure': False } } } merged = merge(dict_1, dict_2) assert merged['deep']['nested'] is not dict_1['deep']['nested'] assert merged['deep']['nested'] is not dict_2['deep']['nested']
def simulate(base2014, base2015, base2016): full_base = helpers.merge(base2014, base2015) result = {stock: {'orders': [], 'weight': 1.0 / len(STOCKS)} for stock in STOCKS} # Start simulation for ticker, data2016 in base2016.items(): stock_data = full_base[ticker] # Data from 2014/2015 for ticker short_averages = [] long_averages = [] variances = [] closing_prices = map(lambda x: x.closing_price, stock_data) for data in data2016: variances.append(compute_variance(closing_prices)) # Compute short EMA short_averages.append(ema(closing_prices, EMA_SHORT)) # Compute long EMA long_averages.append(ema(closing_prices, EMA_LONG)) # Save current data for using on next iteration closing_prices.append(data.closing_price) # Compute choice based on short and long EMA c = choice(short_averages, long_averages) if c == 1: # if choice is 1, then send a buying order. result[ticker]['orders'].append( order.Order(data, order.BUY_ORDER_TYPE)) elif c == -1: # if choice is -1, send a selling order result[ticker]['orders'].append( order.Order(data, order.SELL_ORDER_TYPE)) # print(variances) result[ticker]['short_averages'] = short_averages result[ticker]['long_averages'] = long_averages for ticker in result.keys(): orders = result[ticker]['orders'] for i in range(1, len(orders), 2): buy_order = orders[i-1] sell_order = orders[i] result[ticker]['weight'] = result[ticker]['weight'] + result[ticker]['weight'] * \ compute_gain_or_lost(buy_order, sell_order) if len(orders) % 2 != 0: last_order = orders[-1] if last_order.type == order.SELL_ORDER_TYPE: buy_order = orders[-2] result[ticker]['weight'] = result[ticker]['weight'] + result[ticker]['weight'] * \ compute_gain_or_lost(buy_order, last_order) sum = 0 for ticker in result.keys(): sum += result[ticker]['weight'] result['total'] = sum return result
def simulate(base2014, base2015, base2016): full_base = helpers.merge(base2014, base2015) result = { stock: { 'orders': [], 'weight': 1.0 / len(STOCKS) } for stock in STOCKS } # Start simulation for ticker, data2016 in base2016.items(): stock_data = full_base[ticker] # Data from 2014/2015 for ticker max_average = [] min_average = [] for data in data2016: # defining lists closing_prices = map(lambda x: x.closing_price, stock_data) max_price = map(lambda x: x.max_price, stock_data) min_price = map(lambda x: x.min_price, stock_data) # Compute max SMA max_average.append(sma(max_price, PERIOD)) # Compute min SMA min_average.append(sma(min_price, PERIOD)) # Compute choice based on short and long MME c = choice(max_average, min_average, closing_prices[-1]) stock_data.append(data) if c == 1: # if choice is 1, then send a buying order. result[ticker]['orders'].append( order.Order(data, order.BUY_ORDER_TYPE)) elif c == -1: # if choice is -1, send a selling order result[ticker]['orders'].append( order.Order(data, order.SELL_ORDER_TYPE)) for ticker in result.keys(): orders = result[ticker]['orders'] for i in range(1, len(orders), 2): buy_order = orders[i - 1] sell_order = orders[i] result[ticker]['weight'] = result[ticker]['weight'] + result[ticker]['weight'] * \ compute_gain_or_lost(buy_order, sell_order) if len(orders) % 2 != 0: last_order = orders[-1] if last_order.type == order.SELL_ORDER_TYPE: buy_order = orders[-2] result[ticker]['weight'] = result[ticker]['weight'] + result[ticker]['weight'] * \ compute_gain_or_lost(buy_order, last_order) sum = 0 for ticker in result.keys(): sum += result[ticker]['weight'] result['total'] = sum return result
def test_merge_with_deep_dicts(): """ Test merge function with shallow dictionaries """ merged = merge({'deep': {'nested': {'structure': True}}}, {'foo': 'foo'}) assert merged['deep']['nested']['structure']
def test_merge_with_shallow_dicts_not_shared_keys(): """ Test merge function with shallow dictionaries withot shared keys """ merged = merge({'foo': 'bar'}, {'bar': 'foo'}) assert merged['foo'] == 'bar' assert merged['bar'] == 'foo'
def simulate(base2014, base2015, base2016): ''' Define a simulação do algoritmo, simula o ano de 2016 por completo dia-a-dia. Parâmetros: base2014 - base de dados de 2014 das empresas selecionadas. base2015 - base de dados de 2015 das empresas selecionadas. base2016 - base de dados de 2016 das empresas selecionadas. Retorno: result - resultado da simulação (final do ano). Obs: Note que a base de dados de 2016 é utilizada para controle das datas de operação. ''' old_base = helpers.merge(base2014, base2015) old_data = {ticker: data_old for ticker, data_old in old_base.items()} main_data = {ticker: data2016 for ticker, data2016 in base2016.items()} ticker_list = main_data.keys() days = len(main_data[ticker_list[0]]) result = { stock: { 'orders': [], 'weight': 1.0 / len(STOCKS) } for stock in STOCKS } daily_choice = {ticker: None for ticker in ticker_list} last_ops = {ticker: {"buy": None, "sell": None} for ticker in ticker_list} short_averages = {ticker: [] for ticker in ticker_list} long_averages = {ticker: [] for ticker in ticker_list} closing_prices = {ticker: [] for ticker in ticker_list} for ticker in ticker_list: for day in range(len(old_data[ticker_list[0]])): closing_prices[ticker] = map(lambda x: x.closing_price, old_base[ticker]) # Begin Simulation ------------------------------------------------------------------------- for day in range(days): print("\nDay {0} {1} {2}".format( day + 1, 43 * "--", main_data[ticker_list[0]][day].date)) #----------------- for ticker in ticker_list: short_averages[ticker].append( mma(closing_prices[ticker], MMA_SHORT)) long_averages[ticker].append(mma(closing_prices[ticker], MMA_LONG)) closing_prices[ticker].append(main_data[ticker][day].closing_price) decision = choice(short_averages[ticker], long_averages[ticker]) daily_choice[ticker] = decision # print("{0}: closing_price: {1:6} | avg_price: {2:6} | mma_short: {3:7.6} | mma_long: {4:7.6} | Choice: {5} |" .format(ticker, # main_data[ticker][day].closing_price, # main_data[ticker][day].avg_price, # short_averages[ticker][day], # long_averages[ticker][day], # decision)) # print("{0}" .format(105 * "-")) #---------------------------- print("Decisions Taken") total_daily_result = 0 for ticker in ticker_list: if daily_choice[ticker] == 2: last_ops[ticker]["sell"] = main_data[ticker][day] weight_balance = result[ ticker]["weight"] * compute_gain_or_lost( last_ops[ticker]["buy"], last_ops[ticker]["sell"]) result[ticker][ "weight"] = result[ticker]["weight"] + weight_balance total_daily_result += weight_balance print("Sell {0} | Gain/Loss = {1}".format( ticker, weight_balance)) last_ops[ticker]["buy"] = None elif daily_choice[ticker] == 1: print("Buy {0}".format(ticker)) last_ops[ticker]["buy"] = main_data[ticker][day] last_ops[ticker]["sell"] = None elif daily_choice[ticker] == 0: print("Keep {0}".format(ticker)) print("{0}".format(105 * "-")) #----------------------------- print("Daily Total = {0}".format(total_daily_result)) print("{0}".format(105 * "-")) #----------------------------- sum = 0 for ticker in ticker_list: sum += result[ticker]["weight"] print("Current Total Weight: {0}".format(sum)) print("{0}\n".format(105 * "-")) #----------------------------- sum = 0 for ticker in ticker_list: sum += result[ticker]["weight"] result["total"] = sum return result
df_test = df_test.drop(['Dia_Servei'], axis=1) # Combine columns df_train.Unitats_Demanades = (df_train.Unitats_Demanades - df_train.Unitats_Servides) * df_train.UxC df_train = df_train.rename(columns={'Unitats_Demanades': 'Unitats_Perdudes'}) df_train = df_train.drop(['Unitats_Servides', 'UxC'], axis=1) df_test.Unitats_Demanades = (df_test.Unitats_Demanades - df_test.Unitats_Servides) * df_test.UxC df_test = df_test.rename(columns={'Unitats_Demanades': 'Unitats_Perdudes'}) df_test = df_test.drop(['Unitats_Servides', 'UxC'], axis=1) ======= df['week'] = map(lambda e: (e.year - min_date.year) * 54 + e.isocalendar()[1], df.index.values) # TODO: min 51 df = df[df['week'] > df['week'].max() - PAST_TIMESTEPS] df['week'] = map(lambda x: df['week'].max()-x) df = df.group_by('Client').apply(lambda x: merge(x, PAST_TIMESTEPS)) print(df) >>>>>>> Stashed changes ##### scaled = scale_data(df_train) trainset = scaled # series_to_supervised(scaled, PAST_TIMESTEPS, FUTURE_TIMESTEPS) scaled = scale_data(df_test) testset = scaled # series_to_supervised(scaled, PAST_TIMESTEPS, FUTURE_TIMESTEPS) x_train, y_train, x_test, y_test = split_dataset(trainset, testset, PAST_TIMESTEPS, FUTURE_TIMESTEPS) predictor = build_predictor((x_train.shape[1], x_train.shape[2])) fit_predictor(predictor, x_train, y_train, x_test, y_test, EPOCHS, BATCH_SIZE)