コード例 #1
0
def automate():
	number_of_zipped_files = len(get_all_files_in_directory(zipped_data_path))
	number_of_raw_files = len(get_all_files_in_directory(raw_data_path))
	number_of_filtered_files = len(get_all_files_in_directory(filtered_data_path))
	number_of_individual_files = len(get_all_files_in_directory(individual_data_path))

	if number_of_zipped_files == 0:
		logger.raise_exception('There are no .zip files to work with.', logger.LogicError)

	if number_of_raw_files == 0:
		extract_zip_files()

	if number_of_filtered_files == 0:
		filter_the_stock_files(raw_data_path, filtered_data_path)

	if number_of_individual_files == 0:
		combined_data_frame = load_all_stock_files()

		all_unique_names = combined_data_frame.symbol.values

		timer = Timer()

		for n in all_unique_names:
			timer.start_timer()
			local_data_frame = combined_data_frame[combined_data_frame.symbol == n]

			if len(local_data_frame) != 0:
				local_stock_path = str(str(individual_data_path) + n + '.csv')
				if not os.path.isfile(local_stock_path):
					local_data_frame.drop(local_data_frame.columns[0], axis=1, inplace=True)
					local_data_frame['date'] = pd.to_datetime(local_data_frame['date'])
					local_data_frame = local_data_frame.sort_values(by='date')
					create_csv_file(local_data_frame, local_stock_path)

			print('Length of local data frame: ' + str(len(local_data_frame)) + '\tLength of combined data: ' + str(len(combined_data_frame)))

			combined_data_frame = combined_data_frame[combined_data_frame.symbol != n]

			timer.end_timer()
			timer.print_time()
コード例 #2
0
def code_to_test_01():
    my_list = []
    my_set = set()

    for x in range(10000):
        my_list.append(x)
        my_set.add(x)

    timer = Timer()
    timer.start_timer()
    z = 0
    for y in range(10000):
        if my_list.__contains__(y):
            z += y
    timer.end_timer()
    timer.print_time()

    timer.start_timer()
    z = 0
    for y in range(10000):
        if my_set.__contains__(y):
            z += y
    timer.end_timer()
    timer.print_time()
コード例 #3
0
ファイル: carico.py プロジェクト: utarsuno/GenericProject
    split_list = splits.get_splits_for_stock(un.replace('&', '').replace('.csv', ''))

    if len(split_list) != 0:
    	single_data_frame = perform_splits_on_stock(split_list, single_data_frame)

    inner_earnings = simulation.run_simulation_for_single_stock(single_data_frame)
    if inner_earnings.made_trades():
        all_earnings.append(inner_earnings)
    else:
        bad_stocks.append(un)

    # We are finished using the information for that particular stock so we will remove it from the combined data_xv frame.
    all_data = all_data[all_data.symbol != un]

    inner_timer.end_timer()
    inner_timer.print_time()

timer.end_timer()
timer.print_time()


# Sort all the earnings by daily total profit.
all_earnings.sort(key=lambda x: x.combined_daily, reverse=False)

for earning in all_earnings:
    print(earning)

logger.log('Bad stocks:')
for bd in bad_stocks:
    print(bd)
コード例 #4
0
ファイル: main.py プロジェクト: utarsuno/GenericProject
timer = Timer()

#gc.disable()

for single_stock in dm.get_all_files_in_directory(dm.individual_data_path):

	timer.start_timer()

	single_stock_path = dm.individual_data_path + single_stock
	print(single_stock_path)
	df = pd.read_csv(single_stock_path, header=None, usecols=[0, 1, 2, 3, 4])
	df.columns = ['date', 'open', 'high', 'low', 'close']

	df['date'] = pd.to_datetime(df['date'])

	split_list = splits.get_splits_for_stock(single_stock.replace('&', '').replace('.csv', ''))
	if len(split_list) != 0:
		df = perform_splits_on_stock(split_list, df)

	profits.append(sim.run_simple_simulation_for_single_stock(single_stock, df))

	timer.end_timer()
	timer.print_time()

#gc.enable()

profits.sort(key=lambda x: x.total_profit, reverse=False)

for p in profits:
	print(p)