Esempio n. 1
0
def getinput(obj, parent, needs, callback, **k):
    diag = None
    if hasattr(needs, 'Prompt'):
        diag = needs(parent, obj)

    elif isinstance(needs, basestring) and needs.startswith('@'):
        diag = import_function(needs[1:])(parent, obj)

    if diag is not None:
        diag.Prompt(callback)
        diag.Destroy()
    else:
        if callable(needs):
            needs = needs(obj)

        from pprint import pprint
        pprint(needs)

        if len(needs) == 1 and issubclass(needs[0][0], basestring):
            type, name, default = needs[0]
            val = GetTextFromUser(name, caption=name, default_value=default)
            if val is not None:
                return callback(val)
            return

        FormFrame(obj, parent, needs, callback, **k)
Esempio n. 2
0
def getinput(obj, parent, needs, callback, **k):
    diag = None
    if hasattr(needs, 'Prompt'):
        diag = needs(parent, obj)


    elif isinstance(needs, basestring) and needs.startswith('@'):
        diag = import_function(needs[1:])(parent, obj)


    if diag is not None:
        diag.Prompt(callback)
        diag.Destroy()
    else:
        if callable(needs):
            needs = needs(obj)

        from pprint import pprint
        pprint(needs)

        if len(needs) == 1 and issubclass(needs[0][0], basestring):
            type, name, default = needs[0]
            val = GetTextFromUser(name, caption = name, default_value = default)
            if val is not None:
                return callback(val)
            return

        FormFrame(obj, parent, needs, callback, **k)
Esempio n. 3
0
def lazy_call(handler, *args):
    if not hasattr(handler, '__call__'):
        assert isinstance(handler, basestring)
        from util import import_function
        handler = import_function(handler)
        assert hasattr(handler, '__call__')

    return handler(*args)
Esempio n. 4
0
def lazy_call(handler, *args):
    if not hasattr(handler, '__call__'):
        assert isinstance(handler, basestring)
        from util import import_function
        handler = import_function(handler)
        assert hasattr(handler, '__call__')

    return handler(*args)
Esempio n. 5
0
    def panel_for_tab(self, i):
        'Returns the preference panel for the ith tab.'

        module_name = tabnames[i][0]

        if not module_name in self.loaded_panels:
            log.info('loading panel "%s"', module_name)
            func = import_function('gui.pref.pg_%s.panel' % module_name)
            panel = self._construct_sub_panel(func)
            self.loaded_panels[module_name] = panel

        return self.loaded_panels[module_name]
Esempio n. 6
0
    def panel_for_tab(self, i):
        'Returns the preference panel for the ith tab.'

        module_name = tabnames[i][0]

        if not module_name in self.loaded_panels:
            log.info('loading panel "%s"', module_name)
            func = import_function('gui.pref.pg_%s.panel' % module_name)
            panel = self._construct_sub_panel(func)
            self.loaded_panels[module_name] = panel

        return self.loaded_panels[module_name]
Esempio n. 7
0
    def add_account_gui(self, account_gui):
        '''
        Adds account specific GUI to the "extended" section.

        account_gui must be a dotted string import path to a function
        which returns a GUI component, and will be called with two
        arguments: this dialog, and the account we're editing/creating.
        '''

        log.info('loading account GUI from %r', account_gui)
        self.details_panel = import_function(account_gui)(self, self.account)
        self.details.add(self.details_panel)
        self.info_callbacks += lambda info: info.update(self.details_panel.info())

        self.Sizer.Add(self.details_panel, 0, EXPAND | ALL, self.GetDefaultBorder())
Esempio n. 8
0
    def add_account_gui(self, account_gui):
        """
        Adds account specific GUI to the "extended" section.

        account_gui must be a dotted string import path to a function
        which returns a GUI component, and will be called with two
        arguments: this dialog, and the account we're editing/creating.
        """

        log.info("loading account GUI from %r", account_gui)
        self.details_panel = import_function(account_gui)(self, self.account)
        self.details.add(self.details_panel)
        self.info_callbacks += lambda info: info.update(self.details_panel.info())

        self.Sizer.Add(self.details_panel, 0, EXPAND | ALL, self.GetDefaultBorder())
Esempio n. 9
0
    def __init__(self,
                 input_dims,
                 buffer_size,
                 hidden,
                 layers,
                 network_class,
                 polyak,
                 batch_size,
                 Q_lr,
                 pi_lr,
                 norm_eps,
                 norm_clip,
                 max_u,
                 action_l2,
                 clip_obs,
                 scope,
                 T,
                 rollout_batch_size,
                 subtract_goals,
                 relative_goals,
                 clip_pos_returns,
                 clip_return,
                 bc_loss,
                 q_filter,
                 num_demo,
                 sample_transitions,
                 gamma,
                 reuse=False,
                 **kwargs):
        """Implementation of DDPG that is used in combination with Hindsight Experience Replay (HER).

        Args:
            input_dims (dict of ints): dimensions for the observation (o), the goal (g), and the
                actions (u)
            buffer_size (int): number of transitions that are stored in the replay buffer
            hidden (int): number of units in the hidden layers
            layers (int): number of hidden layers
            network_class (str): the network class that should be used (e.g. 'baselines.her.ActorCritic')
            polyak (float): coefficient for Polyak-averaging of the target network
            batch_size (int): batch size for training
            Q_lr (float): learning rate for the Q (critic) network
            pi_lr (float): learning rate for the pi (actor) network
            norm_eps (float): a small value used in the normalizer to avoid numerical instabilities
            norm_clip (float): normalized inputs are clipped to be in [-norm_clip, norm_clip]
            max_u (float): maximum action magnitude, i.e. actions are in [-max_u, max_u]
            action_l2 (float): coefficient for L2 penalty on the actions
            clip_obs (float): clip observations before normalization to be in [-clip_obs, clip_obs]
            scope (str): the scope used for the TensorFlow graph
            T (int): the time horizon for rollouts
            rollout_batch_size (int): number of parallel rollouts per DDPG agent
            subtract_goals (function): function that subtracts goals from each other
            relative_goals (boolean): whether or not relative goals should be fed into the network
            clip_pos_returns (boolean): whether or not positive returns should be clipped
            clip_return (float): clip returns to be in [-clip_return, clip_return]
            sample_transitions (function) function that samples from the replay buffer
            gamma (float): gamma used for Q learning updates
            reuse (boolean): whether or not the networks should be reused
        """
        if self.clip_return is None:
            self.clip_return = np.inf

        self.create_actor_critic = import_function(self.network_class)

        input_shapes = dims_to_shapes(self.input_dims)
        self.dimo = self.input_dims['o']
        self.dimg = self.input_dims['g']
        self.dimu = self.input_dims['u']

        self.demo_batch_size = 128
        self.lambda1 = 0.001
        self.lambda2 = 0.0078

        self.l2_reg_coeff = 0.005

        # Prepare staging area for feeding data to the model.
        stage_shapes = OrderedDict()
        for key in sorted(self.input_dims.keys()):
            if key.startswith('info_'):
                continue
            stage_shapes[key] = (None, *input_shapes[key])
        for key in ['o', 'g']:
            stage_shapes[key + '_2'] = stage_shapes[key]
        stage_shapes['r'] = (None, )
        self.stage_shapes = stage_shapes

        # Create network.
        with tf.variable_scope(self.scope):
            self.staging_tf = StagingArea(
                dtypes=[tf.float32 for _ in self.stage_shapes.keys()],
                shapes=list(self.stage_shapes.values()))
            self.buffer_ph_tf = [
                tf.placeholder(tf.float32, shape=shape)
                for shape in self.stage_shapes.values()
            ]
            self.stage_op = self.staging_tf.put(self.buffer_ph_tf)

            self._create_network(reuse=reuse)

        # Configure the replay buffer.
        buffer_shapes = {
            key: (self.T if key != 'o' else self.T + 1, *input_shapes[key])
            for key, val in input_shapes.items()
        }
        buffer_shapes['g'] = (buffer_shapes['g'][0], self.dimg)
        buffer_shapes['ag'] = (self.T + 1, self.dimg)

        buffer_size = (self.buffer_size //
                       self.rollout_batch_size) * self.rollout_batch_size
        self.buffer = ReplayBuffer(buffer_shapes, buffer_size, self.T,
                                   self.sample_transitions)

        global demoBuffer
        demoBuffer = ReplayBuffer(buffer_shapes, buffer_size, self.T,
                                  self.sample_transitions)