def __init__(self, scope, q, alpha, reward, discount, quotes, bankroll, log=None): self.logger = log self.actions = ACTIONS # BUY, SELL, DO_NOTHING Indicators.__init__(self, log) Learning.__init__(self, q, alpha, reward, discount, self.state, \ self.actions) Order.__init__(self, scope, bankroll, log) self.num_trades = 0 self.performance = 1 self.volume = max(self.performance, 1) self.logger = log self.status = {'status':'','action':''} self.quotes = quotes self.states = None
def __init__(self, scope, q, alpha, reward, discount, quotes, bankroll, log=None): self.logger = log self.scope = scope self.actions = ACTIONS Indicators.__init__(self, log) Order.__init__(self, scope, bankroll, log) Learning.__init__(self, q, alpha, reward, discount, self.state, \ self.actions) self.num_trades = 0 self.performance = 1 self.volume = max(self.performance, 1) self.logger = log self.status = {'status': 'idle', 'action': ''} self.quotes = quotes self.states = None
def __init__(self,args): self.checkpoint_dir = '/data/tf/ckpts' self.log_dir = '/data/tf/logs' self.result_dir = 'results' self._attrs = ['generator','rule','rule_apply', 'order', 'subsample', 'ratio'] #'gan', ,'parse' Learning.__init__(self, model=args.model, dataset=args.dataset, model_path=args.model_path,data_path=args.data_path, generator=args.generator, rule=args.rule, gan=args.gan, parse=args.parse, rule_apply=args.rule_apply,order=args.order, subsample=args.subsample, ratio=args.ratio, reload=args.reload, beam=args.beam) tf.reset_default_graph() tf_config = tf.ConfigProto() tf_config.gpu_options.allow_growth=True tf_config.intra_op_parallelism_threads=15 tf_config.inter_op_parallelism_threads=15 self.sess = tf.InteractiveSession(config=tf_config)