class TasksModel(): __connect = once(BaseModel.db_connect) def __init__(self): self.__connect([self.__Model]) def query(self): q = self.__Model.select() # returns an array of all items return q def add(self, item): task = self.__Model.create( title=item['title'], description=item['details'] ) return task def remove(self, id): q = self.__Model.get(self.__Model.id == id) return q.delete_instance() # returns number of rows deleted class __Model(BaseModel): """ Outbox Data Model used as an inner class for purposes controlling scope of the model The methods implemented by the parent class Outbox provided functionality for interacting with the data model """ title = CharField(unique=True, column_name='task_title') details = TextField(, column_name='task_details') created_on = DateTimeField(default=datetime.datetime.now) class Meta: table_name = 'tasks'
def __init__(self, memory_spec, algorithm, body): super(OnPolicyReplay, self).__init__(memory_spec, algorithm, body) # NOTE for OnPolicy replay, frequency = episode; for other classes below frequency = frames util.set_attr(self, self.agent_spec['algorithm'], ['training_frequency']) self.state_buffer = deque(maxlen=0) # for API consistency # Don't want total experiences reset when memory is self.is_episodic = True self.total_experiences = 0 self.warn_size_once = ps.once(lambda msg: logger.warn(msg)) self.reset()
def __init__(self, memory_spec, body): ''' @param {*} body is the unit that stores its experience in this memory. Each body has a distinct memory. ''' self.memory_spec = memory_spec self.body = body # declare what data keys to store self.data_keys = ['states', 'actions', 'rewards', 'next_states', 'dones', 'priorities'] # the basic variables for every memory self.last_state = None # method to log size warning only once to prevent spamming log self.warn_size_once = ps.once(lambda msg: logger.warn(msg)) # for API consistency, reset to some max_len in your specific memory class self.state_buffer = deque(maxlen=0) # total_reward and its history over episodes self.total_reward = 0
import os import colorlover as cl import pydash as ps # The data visualization module # Defines plotting methods for analysis from plotly import graph_objs as go, io as pio, tools from plotly.offline import init_notebook_mode, iplot from convlab.lib import logger, util logger = logger.get_logger(__name__) # warn orca failure only once orca_warn_once = ps.once(lambda e: logger.warning( f'Failed to generate graph. Run retro-analysis to generate graphs later.')) if util.is_jupyter(): init_notebook_mode(connected=True) def create_label(y_col, x_col, title=None, y_title=None, x_title=None, legend_name=None): '''Create label dict for go.Layout with smart resolution''' legend_name = legend_name or y_col y_col_list, x_col_list, legend_name_list = ps.map_( [y_col, x_col, legend_name], util.cast_list) y_title = str(y_title or ','.join(y_col_list))
def test_once(case, arglist, expected): for args in arglist: _.once(case)(*args) == expected
def __init__(self, access_token: dict, profile_factory: Callable[[], Any]): self._access_token = copy.deepcopy(access_token) self._profile_factory = pydash.once(profile_factory)
# Defines plotting methods for analysis from glob import glob from plotly import graph_objs as go, io as pio, tools from plotly.offline import init_notebook_mode, iplot from slm_lab.lib import logger, util import colorlover as cl import os import pydash as ps logger = logger.get_logger(__name__) # moving-average window size for plotting PLOT_MA_WINDOW = 100 # warn orca failure only once orca_warn_once = ps.once(lambda e: logger.warning( f'Failed to generate graph. Run retro-analysis to generate graphs later. {e}\nIf running on a headless server, prepend your Python command with `xvfb-run -a `, for example `xvfb-run -a python run_lab.py`' )) if util.is_jupyter(): init_notebook_mode(connected=True) def calc_sr_ma(sr): '''Calculate the moving-average of a series to be plotted''' return sr.rolling(PLOT_MA_WINDOW, min_periods=1).mean() def create_label(y_col, x_col, title=None, y_title=None, x_title=None,