import pandas as pd import numpy as np import streamlit as st import logging from src.helper import create_logger logger = create_logger('child', 'logs/Child.log', logging.DEBUG, logging.WARNING) class Teacher_Knowledge: def __init__(self): self.data = None def childanalyzer(self): pass def get_features(self): return self.data.columns def check_feature_length(self): return len(self.data.columns) class Child_Capabilities: def add_feature(self, new_feature: pd.DataFrame): """ Function to append single feature in the dataframe Updates self.data """ logger.debug(
import logging from src.main import main from src.helper import create_logger logger = create_logger('root', 'logs/root.log', logging.DEBUG, logging.WARNING) if __name__ == "__main__": logger.info("Running the app") main() logger.info("Closing the app")
import pandas as pd import streamlit as st from itertools import repeat import multiprocessing from multiprocessing import Pool import pickle import logging Î from src.helper import create_logger logger = create_logger('Feature', 'logs/Feature.log', logging.DEBUG, logging.WARNING) class Feature(): """Feature class """ def __init__(self, name, requires=[]): self.name = name self.requires = requires def indicator(self,*args): return 0 def worker(self, tick): # fetch childrens loc = self.parent.fetch_child_by_name(tick) child = self.parent.children[loc[0]] # fetch required data data = child.data[self.requires]
"""Home page shown when the user enters the application""" import streamlit as st import logging import awesome_streamlit as ast from datetime import datetime import os from src.eda.government import Government from src.helper import create_logger from src.features.features import Feature from src.features.indicators.custom_indicator import Custom_indicator logger = create_logger('viz', 'logs/Viz.log', logging.DEBUG, logging.WARNING) def fetch_tracker(tracker, gov): # Fetch the tracker and find it's location tracker = tracker.split() name = tracker[0] logger.debug({"Tracking Index": name}) start_date = datetime.strptime(tracker[2], '%Y-%m-%d') logger.debug({"Start Date": start_date}) end_date = datetime.strptime(tracker[4], '%Y-%m-%d') logger.debug({"End Date": end_date}) loc = gov.fetch_parent_by_nate(name, start_date, end_date) if len(loc) != 1: st.error("Issue while setting tracker") st.stop() return loc # pylint: disable=line-too-long
import statsmodels.api as sm import numpy as np import pandas as pd from src.hedging.factors.momentum import Momentum from src.hedging.factors.beta import Beta from src.hedging.parser import Parser from src.hedging.caller import Caller import streamlit as st import logging from datetime import datetime from src.eda.government import Government from src.helper import create_logger logger = create_logger('Hedging_Page', 'logs/Hedging.log', logging.DEBUG, logging.WARNING) def fetch_tracker(tracker, gov): """Fetches the population the framework will focus upon Args: tracker (list/dic): tracker parameters gov (obj): Government object Returns: location of tracker """ # Fetch the tracker and find it's location tracker = tracker.split() name = tracker[0] logger.debug({"Tracking Index": name})
import streamlit as st from multiprocessing import Pool import logging import yfinance as yf from src.helper import create_logger from src.hedging.division import Division import datetime import importlib import inspect import os import glob import itertools from glob import iglob from os.path import basename, relpath, sep, splitext logger = create_logger('Parser', 'logs/Hedging.log', logging.DEBUG, logging.WARNING) class Parser(): def __init__(self, caller): self.factor = [] self.call = caller def import_plugins(self, *args, **kwargs): return 0 # def find_all_factor(self): # # Find factors in factors folder and their requirement # # lis = os.listdir("src/hedging/factors/") # self.factors = self.import_plugins( # "src/hedging/factors/", create_instance=False, filter_abstract=False)
from src.eda.child import Child import yfinance as yf from os import listdir import pickle import logging import os import multiprocessing from multiprocessing import Pool from multiprocessing import Manager from multiprocessing.sharedctypes import Array import streamlit as st import datetime import shutil import numpy as np logger = create_logger('parent', 'logs/Parent.log', logging.INFO, logging.WARNING) class Shops(): def __init__(self): self.start_date = None self.end_date = None def handle_null(self): pass def parser(self, name, ticker): """ Function to take name and ticker and download data from yfinance """ if name in listdir(".temp/"):
"""This script contains code for the Goverment class and it's inherited properties """ import logging from src.eda.parent import Parent import streamlit as st import pickle import pandas as pd from src.helper import create_logger from src.eda.graphs import Graph logger = create_logger('process', 'logs/Government.log', logging.DEBUG, logging.WARNING) class Control_Population(): """Perform CRUD operations. """ def __init__(self): self.population = {} def add_parent(self, filename, start_date, end_date): """Adds Parent into Government controlled Population. Args: filename (str): name of Parent(Ticker Symbol) start_date (date): Start date end_date (date): [description] Returns:
import pandas as pd import streamlit as st from itertools import repeat import multiprocessing from multiprocessing import Pool import pickle import logging import time from src.helper import create_logger logger = create_logger('Divison', 'logs/Hedging.log', logging.DEBUG, logging.WARNING) class Division(): def __init__(self): self.parent = None def pull_range_of_time(self): self.range = [] for x in self.parent.children[0].data.index: if x >= self.parent.start_date: self.range.append(x) def chunks(self, lst, n): """Yield successive n-sized chunks from lst.""" chunk = {} counter = 0 for i in range(0, len(lst), n): chunk[counter] = lst[i:i + n] counter += 1