Example #1
0
        if daily_stock_quote:
            for k in DailyStockQuote.fields.keys():
                if k in OptionEffect.fields:
                    self.__set(k, daily_stock_quote.get(k))

    def set(self, key, value):
        raise RuntimeError("Should not be used")

    def __set(self, key, value):
        if key not in OptionEffect.fields:
            raise KeyError(f"Invalid key={key}")
        self._values[key] = value


if __name__ == '__main__':
    metadata_dir = setup_metadata_dir()
    setup_logger(__file__)
    logger.setLevel(logging.DEBUG)
    logging.getLogger("web_chrome_driver").setLevel(logging.DEBUG)
    # test from online reading
    with ChromeDriver() as browser:
        option_quote = read_daily_option_quote(browser,
                                               "AMD",
                                               "Call",
                                               60.0,
                                               20220121,
                                               use_barchart=True)
        stock_quote = read_stock_quote(browser, "AMD")
        option_effect = OptionEffect(daily_option_quote=option_quote,
                                     daily_stock_quote=stock_quote)
        print(json.dumps(option_effect.__dict__, indent=4))
Example #2
0
# from scipy.misc import imresize
# from tensorboardX import SummaryWriter
# import warnings
from sklearn.metrics import roc_auc_score
from sklearn.metrics import average_precision_score
import cv2

from utils_logging import setup_logger
from models.__init__ import save_checkpoint, resume_checkpoint

from parse_inputs import parse_inputs
args = parse_inputs()

logger = setup_logger(name='first_logger',
                      log_dir='./logs/',
                      log_file=args.log_file,
                      log_format='%(asctime)s %(levelname)s %(message)s',
                      verbose=True)


def main():
    # transform = _get_transform(args.input_resolution)

    # Prepare data
    print("Loading Data")

    batch_size = args.batch_size
    train_set = GazeDataset(args.train_dir, args.train_annotation, 'train')
    train_data_loader = DataLoader(dataset=train_set,
                                   batch_size=batch_size,
                                   shuffle=False,