Beispiel #1
0
def brick_to_process(brick, project):
    '''
    Converts a brick database entry (document) into a "fake process": a
    :class:`̀~capsul.process.process.Process` direct instance (not subclassed)
    which cannot do any actual processing, but which represents its parameters
    with values (traits and values). The process gets a ``name`` and an
    ``uuid`` from the brick, and also an ``exec_time``.
    '''

    if isinstance(brick, str):
        # brick is an id
        session = project.session
        brick = session.get_document(COLLECTION_BRICK, brick)
    if brick is None:
        return None

    inputs = brick[BRICK_INPUTS]
    outputs = brick[BRICK_OUTPUTS]

    proc = Process()
    proc.name = brick[BRICK_NAME].split('.')[-1]
    proc.uuid = brick[BRICK_ID]
    proc.exec_time = brick[BRICK_EXEC_TIME]

    for name, value in inputs.items():
        proc.add_trait(name, traits.Any(output=False, optional=True))
        setattr(proc, name, value)

    for name, value in outputs.items():
        proc.add_trait(name, traits.Any(output=True, optional=True))
        setattr(proc, name, value)

    return proc
Beispiel #2
0
class PluginController(_traitsui.Controller):
    update_tree = _traits.Event()
    selected_object = _traits.Any()
    edit_node = _traits.Instance(Model)
    win_handle = _traits.Any()

    def init(self, info):
        self.selected_object = self.model
        self.edit_node = self.model.calculator
        self.win_handle = info.ui.control
        return True
Beispiel #3
0
    def __init__(self, pipeline, name, is_output=True, input_type=None):
        in_traitsl = ['inputs', 'exclude']
        if is_output:
            out_traitsl = ['filtered']
        else:
            out_traitsl = []
            in_traitsl.append('filtered')
        in_traits = []
        out_traits = []
        for tr in in_traitsl:
            in_traits.append({'name': tr, 'optional': True})
        for tr in out_traitsl:
            out_traits.append({'name': tr, 'optional': True})
        super(ExcludeNode, self).__init__(pipeline, name, in_traits,
                                          out_traits)
        if input_type:
            ptype = input_type
        else:
            ptype = traits.Any(traits.Undefined)

        self.add_trait('inputs', traits.List(ptype, output=False))
        self.add_trait('exclude', ptype)
        self.add_trait('filtered', traits.List(ptype, output=is_output))

        self.set_callbacks()
Beispiel #4
0
    def __init__(self, pipeline, name, is_output=True, input_type=None):
        in_traitsl = ['inputs', 'fold', 'nfolds']
        if is_output:
            out_traitsl = ['learn_list', 'test_list']
        else:
            out_traitsl = []
            in_traitsl += ['learn_list', 'test_list']
        in_traits = []
        out_traits = []
        for tr in in_traitsl:
            in_traits.append({'name': tr, 'optional': True})
        for tr in out_traitsl:
            out_traits.append({'name': tr, 'optional': True})
        super(CVFilterNode, self).__init__(pipeline, name, in_traits,
                                           out_traits)
        if input_type:
            ptype = input_type
        else:
            ptype = traits.Any(traits.Undefined)

        self.add_trait('inputs', traits.List(ptype, output=False))
        self.add_trait('fold', traits.Int())
        self.add_trait('nfolds', traits.Int(10))
        self.add_trait('learn_list', traits.List(ptype, output=is_output))
        self.add_trait('test_list', traits.List(ptype, output=is_output))

        self.set_callbacks()
Beispiel #5
0
class _H5Trees(api.HasTraits):
    h5_trees = api.Instance(Hdf5FilesNode)
    node = api.Any()
    path = api.Str()

    traits_view = ui.View(
        ui.Group(
            ui.Item(
                'h5_trees',
                editor=_hdf5_tree_editor(selected='node'),
                resizable=True,
            ),
            ui.Item('path', label='Selected node'),
            orientation='vertical',
        ),
        title='Multiple HDF5 file Tree Example',
        buttons=['OK', 'Cancel'],
        resizable=True,
        width=0.3,
        height=0.3,
    )

    def _node_changed(self):
        self.path = self.node.path
        print(self.node.path)
def createOption(name, initialValue):
    """creates an option with a boolean attribute as the value, type should be the result of type(value)"""
    option = Option(name=name)
    if type(initialValue) is bool:
        option.add_trait("value", traits.Bool(initialValue))
    elif type(initialValue) is int:
        option.add_trait("value", traits.Int(initialValue))
    elif type(initialValue) is float:
        option.add_trait("value", traits.Float(initialValue))
    elif type(initialValue) is str:
        option.add_trait("value", traits.File(initialValue))
        # # need to modify the view, not sure how to make this more elegantly
        option.traits_view = traitsui.View(
            traitsui.HGroup(
                traitsui.Item("name",
                              style="readonly",
                              springy=True,
                              show_label=False),
                traitsui.Item(
                    "value",
                    show_label=False,
                    springy=True,
                    editor=traitsui.FileEditor(dialog_style='save'))))
    else:
        logger.warning(
            "unrecognised option type ({}) in processor. Using traits.Any Editor and value"
            .format(type(initialValue)))
        option.add_trait("value", traits.Any(initialValue))
    return option
Beispiel #7
0
 def add_parameters(self, param_types={}):
     added_traits = [self._concat_plug]
     for name in self._concat_sequence + added_traits:
         plug = self.plugs[name]
         ptype = param_types.get(name)
         if ptype is None:
             ptype = traits.Any(traits.Undefined)
         self.add_trait(name, ptype)
         self.trait(name).output = plug.output
         self.trait(name).optional = plug.optional
Beispiel #8
0
 def build_node(cls, pipeline, name, conf_controller):
     t = None
     if conf_controller.param_type == 'Str':
         t = traits.Str(traits.Undefined)
     elif conf_controller.param_type == 'File':
         t = traits.File(traits.Undefined)
     elif conf_controller.param_type == 'Any':
         t = traits.Any()
     elif conf_controller.param_type not in (None, traits.Undefined):
         t = getattr(traits, conf_controller.param_type)()
     node = StrConvNode(pipeline, name, input_type=t)
     return node
class Option(traits.HasTraits):

    name = traits.String(
        desc="key from options dictionary. describes the option")
    value = traits.Any()
    traits_view = traitsui.View(
        traitsui.HGroup(
            traitsui.Item("name",
                          style="readonly",
                          springy=True,
                          show_label=False),
            traitsui.Item("value", show_label=False, springy=True)))
Beispiel #10
0
class PluginController(_traitsui.Controller):
    update_tree = _traits.Event()
    selected_object = _traits.Any()
    edit_node = _traits.Instance(ModelController)
    win_handle = _traits.Any()

    def init(self, info):
        self.selected_object = self.model
        self.win_handle = info.ui.control
        # self.edit_node = self.model.calculator
        return True

    @_traits.on_trait_change('selected_object')
    def _tree_selection_made(self, obj, name, new):
        if isinstance(new, ModelController):
            self.edit_node = new
        elif isinstance(new, WindowLauncher):
            self.edit_node = new.owner_ref
        else:
            self.edit_node = self.dummy_model_controller

    @_traits.on_trait_change('update_tree')
    def _tree_update(self, info):
        print("Tree update")
Beispiel #11
0
class Node(HasTraits):

    id = T.Trait(uuid.UUID)
    graph = T.Trait(Graph)
    executor = T.Trait(futures.ThreadPoolExecutor)
    timeout = T.Any()
    f = T.Function()
    name = T.String()
    result = T.Trait(futures.Future)

    def __init__(self,
                 (f, args, kws),
                 graph=None,
                 executor=None,
                 timeout=None,
                 type=None):
Beispiel #12
0
    def __init__(self,
                 pipeline,
                 name,
                 is_output=True,
                 input_type=None,
                 test_is_output=True,
                 has_index=True):
        self.has_index = has_index
        in_traitsl = ['inputs']
        if has_index:
            in_traitsl.append('index')
        if is_output:
            out_traitsl = ['train']
        else:
            out_traitsl = []
            in_traitsl.append('train')
        if test_is_output:
            out_traitsl.append('test')
        else:
            in_traitsl.append('test')
        in_traits = []
        out_traits = []
        for tr in in_traitsl:
            in_traits.append({'name': tr, 'optional': True})
        for tr in out_traitsl:
            out_traits.append({'name': tr, 'optional': True})
        super(LeaveOneOutNode, self).__init__(pipeline, name, in_traits,
                                              out_traits)
        if input_type:
            ptype = input_type
        else:
            ptype = traits.Any(traits.Undefined)

        self.add_trait('inputs', traits.List(ptype, output=False))
        if has_index:
            self.add_trait('index', traits.Int(0))
        self.add_trait('train', traits.List(ptype, output=is_output))
        self.add_trait('test', ptype)
        self.trait('test').output = test_is_output
        self.trait('train').inner_traits[0].output = is_output

        self.set_callbacks()
Beispiel #13
0
    def __init__(self, pipeline, name, input_type=None):
        in_traitsl = ['inputs', 'fold', 'nfolds']
        out_traitsl = ['train', 'test']
        in_traits = []
        out_traits = []
        for tr in in_traitsl:
            in_traits.append({'name': tr, 'optional': True})
        for tr in out_traitsl:
            out_traits.append({'name': tr, 'optional': True})
        super(CrossValidationFoldNode, self).__init__(pipeline, name,
                                                      in_traits, out_traits)
        if input_type:
            ptype = input_type
        else:
            ptype = traits.Any(traits.Undefined)

        self.add_trait('inputs', traits.List(ptype, output=False))
        self.add_trait('fold', traits.Int())
        self.add_trait('nfolds', traits.Int(10))
        is_output = True  # not a choice for now.
        self.add_trait('train', traits.List(ptype, output=is_output))
        self.add_trait('test', traits.List(ptype, output=is_output))

        self.set_callbacks()
Beispiel #14
0
    def __init__(self, pipeline, name, input_type=None):
        in_traitsl = ['input']
        out_traitsl = ['output']
        in_traits = []
        out_traits = []
        for tr in in_traitsl:
            in_traits.append({'name': tr, 'optional': True})
        for tr in out_traitsl:
            out_traits.append({'name': tr, 'optional': True})
        super(StrConvNode, self).__init__(pipeline, name, in_traits,
                                          out_traits)
        if input_type:
            ptype = input_type
        else:
            ptype = traits.Any(traits.Undefined)

        self.add_trait('input', ptype)
        self.trait('input').output = False
        is_output = True  # not a choice for now.
        self.add_trait('output', traits.Str(output=is_output))
        self.input = 0
        self.filter_callback('input', 0)

        self.set_callbacks()
Beispiel #15
0
class ModelController(_traitsui.Controller):
    '''MVController base class for stat analysis model'''
    id = _traits.DelegatesTo('model')
    name = _traits.Str()
    plot_uis = _traits.List()
    win_handle = _traits.Any()

    # def init(self, info):
    #     super(ModelController, self).init(info)
    #     self.win_handle = info.ui.control

    def _name_default(self):
        raise NotImplementedError('_name_default')

    def __eq__(self, other):
        return self.id == other

    def __ne__(self, other):
        return self.id != other

    def get_result(self):
        return self.model.res

    def open_window(self, viewable, view_loop):
        """Expected viewable is by now:
          + Plot subtype
          + DataSet type
        """
        if isinstance(viewable, PCScatterPlot):
            res = self.get_result()
            plot_control = PCPlotControl(viewable)

            win = SinglePlotWindow(
                plot=plot_control,
                res=res,
                view_loop=view_loop,
            )

            self._show_plot_window(win)
        elif isinstance(viewable, _chaco.DataView):
            res = self.get_result()
            plot_control = NoPlotControl(viewable)

            win = SinglePlotWindow(
                plot=plot_control,
                res=res,
                view_loop=view_loop,
            )

            self._show_plot_window(win)
        elif isinstance(viewable, DataSet):
            table = DSTableViewer(viewable)
            table.edit_traits(view=table.get_view(),
                              kind='live',
                              parent=self.win_handle)
        else:
            raise NotImplementedError("Do not know how to open this")

    def _show_plot_window(self, plot_window):
        # FIXME: Setting parent forcing main ui to stay behind plot windows
        # plot_window.mother_ref = self
        if sys.platform == 'linux2':
            self.plot_uis.append(
                # plot_window.edit_traits(parent=self.win_handle, kind='live')
                plot_window.edit_traits(kind='live'))
        # elif sys.platform == 'win32':
        else:
            # FIXME: Investigate more here
            self.plot_uis.append(
                plot_window.edit_traits(parent=self.win_handle, kind='live')
                # plot_window.edit_traits(kind='live')
            )

    def _wind_title(self, res):
        mn = res.method_name
        return "{0} | Overview - ConsumerCheck".format(mn)
Beispiel #16
0
class GCS(t.HasTraits):
    """
    This is the ground control station GUI class. 
    
    For usage, for example if telemetry radio is on COM8 and the GCS Pixhawk is
    on COM7:
    >>> gcs = GCS()
    >>> gcs.setup_uav_link("COM8")
    >>> gcs.poll_uav()
    >>> gcs.setup_gcs_link("COM7")
    >>> gcs.poll_gcs()
    >>> gcs.configure_traits()
    >>> gcs.close()
    """
    # Connections
    dialect = t.Str("gcs_pixhawk")
    show_errors = t.Bool(True)

    # ON GCS: Outgoing
    mission_message = t.Enum(set_mission_mode.keys(), label="Mission Type")
    sweep_angle = t.Float(label="Angle (degrees)")
    sweep_alt_start = t.Float(label="Start Altitude (m)")
    sweep_alt_end = t.Float(label="End Altitude (m)")
    sweep_alt_step = t.Float(label="Number of Altitude Steps")

    mavlink_message = t.Enum(mavlink_msgs, label="Mavlink Message")
    mavlink_message_filt = t.Enum(mavlink_msgs_filt, label="Mavlink Message")
    mavlink_message_params = t.Str(label="params")
    mavlink_message_args = t.Str(', '.join(
        mavlink_msgs_attr[mavlink_msgs_filt[0]]['args'][1:]),
                                 label="Arguments")

    # ON GCS: Incoming
    # Tether
    tether_length = t.Float(t.Undefined, label='Length (m)')
    tether_tension = t.Float(t.Undefined, label='Tension (N)')
    tether_velocity = t.Float(t.Undefined, label="Velocity")

    # ON GCS: Incoming
    # GCS Pixhawk
    gcs_eph = t.Float(t.Undefined)
    gcs_epv = t.Float(t.Undefined)
    gcs_satellites_visible = t.Int(t.Undefined)
    gcs_fix_type = t.Int(t.Undefined)

    gcs_airspeed = t.Float(t.Undefined)
    gcs_groundspeed = t.Float(t.Undefined)
    gcs_heading = t.Float(t.Undefined)
    gcs_velocity = t.Array(shape=(3, ))

    # Location inputs
    gcs_alt = t.Float(t.Undefined)
    gcs_lat = t.Float(t.Undefined)
    gcs_lon = t.Float(t.Undefined)

    # Attitude inputs
    gcs_pitch = t.Float(t.Undefined)
    gcs_roll = t.Float(t.Undefined)
    gcs_yaw = t.Float(t.Undefined)
    gcs_pitchspeed = t.Float(t.Undefined)
    gcs_yawspeed = t.Float(t.Undefined)
    gcs_rollspeed = t.Float(t.Undefined)

    # Battery Inputs
    gcs_current = t.Float(t.Undefined)
    gcs_level = t.Float(t.Undefined)
    gcs_voltage = t.Float(t.Undefined)

    # GCS connectinos
    gcs = t.Any(t.Undefined)
    gcs_polling = t.Bool(False)
    gcs_msg_thread = t.Instance(threading.Thread)
    gcs_error = t.Int(0)
    gcs_port = t.Str(t.Undefined)
    gcs_baud = t.Int(t.Undefined)

    # ON DRONE: Incoming
    # Mission Status
    mission_status = t.Enum(mission_status.keys())

    # Probe
    probe_u = t.Float(t.Undefined, label="u (m/s)")
    probe_v = t.Float(t.Undefined, label="v (m/s)")
    probe_w = t.Float(t.Undefined, label="w (m/s)")

    # Vehicle inputs
    uav_modename = t.Str(t.Undefined)
    uav_armed = t.Bool(t.Undefined)
    uav_eph = t.Float(t.Undefined)
    uav_epv = t.Float(t.Undefined)
    uav_satellites_visible = t.Int(t.Undefined)
    uav_fix_type = t.Int(t.Undefined)

    uav_airspeed = t.Float(t.Undefined)
    uav_groundspeed = t.Float(t.Undefined)
    uav_heading = t.Float(t.Undefined)
    uav_velocity = t.Array(shape=(3, ))

    # Location inputs
    uav_alt = t.Float(t.Undefined)
    uav_lat = t.Float(t.Undefined)
    uav_lon = t.Float(t.Undefined)

    # Attitude inputs
    uav_pitch = t.Float(t.Undefined)
    uav_roll = t.Float(t.Undefined)
    uav_yaw = t.Float(t.Undefined)
    uav_pitchspeed = t.Float(t.Undefined)
    uav_yawspeed = t.Float(t.Undefined)
    uav_rollspeed = t.Float(t.Undefined)

    # Battery Inputs
    uav_current = t.Float(t.Undefined)
    uav_level = t.Float(t.Undefined)
    uav_voltage = t.Float(t.Undefined)

    # Vehicle Connections
    uav = t.Any(t.Undefined)
    uav_polling = t.Bool(False)
    uav_msg_thread = t.Instance(threading.Thread)
    uav_error = t.Int(0)
    uav_port = t.Str(t.Undefined)
    uav_baud = t.Int(t.Undefined)

    # GCS connectinos
    gcs = t.Any(t.Undefined)
    gcs_polling = t.Bool(False)
    gcs_msg_thread = t.Instance(threading.Thread)
    gcs_error = t.Int(0)
    gcs_port = t.Str(t.Undefined)
    gcs_baud = t.Int(t.Undefined)

    # ui Buttons and display groups
    update_mission = t.Button("Update")
    send_mavlink_message = t.Button("Send")
    filtered = t.Bool(True)

    group_input = tui.Group(
        tui.Item(name="mission_status", enabled_when='False'),
        tui.Item(name="mission_message"),
        tui.Item(name="sweep_angle",
                 visible_when='mission_message=="SCHEDULE_SWEEP"'),
        tui.Item(name="sweep_alt_start",
                 visible_when='mission_message=="SCHEDULE_SWEEP"'),
        tui.Item(name="sweep_alt_end",
                 visible_when='mission_message=="SCHEDULE_SWEEP"'),
        tui.Item(name="sweep_alt_step",
                 visible_when='mission_message=="SCHEDULE_SWEEP"'),
        tui.Item(name="update_mission"),
        tui.Item("_"),
        tui.Item("filtered"),
        tui.Item("mavlink_message", visible_when='filtered==False'),
        tui.Item("mavlink_message_filt", visible_when='filtered'),
        tui.Item("mavlink_message_args",
                 enabled_when='False',
                 editor=tui.TextEditor(),
                 height=-40),
        tui.Item("mavlink_message_params"),
        tui.Item("send_mavlink_message"),
        tui.Item("_"),
        tui.Item(name="tether_tension", enabled_when='False'),
        tui.Item(name="tether_length", enabled_when='False'),
        tui.Item(name="tether_velocity", enabled_when='False'),
        tui.Item("_"),
        orientation="vertical",
        show_border=True,
        label="On GCS")
    group_uav = tui.Group(tui.Item(name="uav_modename", enabled_when='False'),
                          tui.Item(name="uav_airspeed", enabled_when='False'),
                          tui.Item(name="uav_groundspeed",
                                   enabled_when='False'),
                          tui.Item(name='uav_armed', enabled_when='False'),
                          tui.Item(name='uav_alt', enabled_when='False'),
                          tui.Item(name='uav_lat', enabled_when='False'),
                          tui.Item(name='uav_lon', enabled_when='False'),
                          tui.Item(name='uav_velocity', enabled_when='False'),
                          tui.Item(name='uav_pitch', enabled_when='False'),
                          tui.Item(name='uav_roll', enabled_when='False'),
                          tui.Item(name='uav_yaw', enabled_when='False'),
                          tui.Item(name='uav_current', enabled_when='False'),
                          tui.Item(name='uav_level', enabled_when='False'),
                          tui.Item(name='uav_voltage', enabled_when='False'),
                          tui.Item("_"),
                          tui.Item(name='probe_u', enabled_when='False'),
                          tui.Item(name='probe_v', enabled_when='False'),
                          tui.Item(name='probe_w', enabled_when='False'),
                          orientation='vertical',
                          show_border=True,
                          label="Incoming")
    group_gcs = tui.Group(tui.Item(name="gcs_airspeed", enabled_when='False'),
                          tui.Item(name="gcs_groundspeed",
                                   enabled_when='False'),
                          tui.Item(name='gcs_alt', enabled_when='False'),
                          tui.Item(name='gcs_lat', enabled_when='False'),
                          tui.Item(name='gcs_lon', enabled_when='False'),
                          tui.Item(name='gcs_velocity', enabled_when='False'),
                          tui.Item(name='gcs_pitch', enabled_when='False'),
                          tui.Item(name='gcs_roll', enabled_when='False'),
                          tui.Item(name='gcs_yaw', enabled_when='False'),
                          tui.Item(name='gcs_current', enabled_when='False'),
                          tui.Item(name='gcs_level', enabled_when='False'),
                          tui.Item(name='gcs_voltage', enabled_when='False'),
                          orientation='vertical',
                          show_border=True,
                          label="GCS")
    traits_view = tui.View(tui.Group(group_input,
                                     group_uav,
                                     group_gcs,
                                     orientation='horizontal'),
                           resizable=True)

    def _update_mission_fired(self):
        """ This will fire when the update_mission button is clicked
        
        In that case we send one of our custom MAVLINK messages, either
        set_mission_mode or schedule_sweep
        
        """
        mode = set_mission_mode[self.mission_message]
        if mode >= 0:
            self.uav.mav.set_mission_mode_send(mode)
        else:
            self.uav.mav.schedule_sweep_send(self.sweep_angle,
                                             self.sweep_alt_start,
                                             self.sweep_alt_end,
                                             self.sweep_alt_step)

    def _mavlink_message_changed(self):
        """ This will fire when the dropdown is changed
        """
        self.mavlink_message_args = ', '.join(
            mavlink_msgs_attr[self.mavlink_message]['args'][1:])

    def _mavlink_message_filt_changed(self):
        """ This will fire when the filtered dropdown is changed
        """
        self.mavlink_message_args = ', '.join(
            mavlink_msgs_attr[self.mavlink_message_filt]['args'][1:])

    def _send_mavlink_message_fired(self):
        """ This will fire when the send_mavlink_message button is clicked
        
        In that case we pass on the mavlink message that the user is trying
        to send. 
        """
        func = mavlink_msgs_attr[self.mavlink_message]['name']
        args = [float(m) for m in self.mavlink_message_params.split(',')]
        getattr(self.uav.mav, func)(*args)

    def setup_uav_link(self, uav_port, uav_baud=56700):
        """
        This sets up the connection to the UAV. 
        
        Parameters
        -----------
        uav_port : str
            Serial port where UAV is connected (via telemetry radio)
        uav_baud: int, optional
            The baud rate. Default is 56700
        """
        mavutil.set_dialect(self.dialect)
        self.uav = mavutil.mavlink_connection(uav_port, uav_baud)
        self.uav_port = uav_port
        self.uav_baud = uav_baud

    def setup_gcs_link(self, gcs_port, gcs_baud=115200):
        """
        This sets up the connection to the GCS Pixhawk. 
        
        Parameters
        -----------
        uav_port : str
            Serial port where GCS Pixhawk is connected (via usb cable)
        uav_baud: int, optional
            The baud rate. Default is 115200
        """
        mavutil.set_dialect(self.dialect)
        self.gcs = mavutil.mavlink_connection(gcs_port, gcs_baud)
        self.gcs_port = gcs_port
        self.gcs_baud = gcs_baud

    def poll_uav(self):
        """
        This runs a new thread that listens for messages from the UAV and
        parses them for the GCS
        """
        self.uav_polling = True

        def worker():
            # Make sure we are connected
            m = self.uav
            m.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS,
                                 mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0,
                                 0)
            print("Waiting for heartbeat from %s" % m.address)
            self.uav.wait_heartbeat()
            print "Found Heardbeat, continuing"

            i = 0
            while self.uav_polling:
                #                print "uav_polling round", i
                i += 1
                try:
                    s = m.recv(16 * 1024)
                except Exception:
                    time.sleep(0.1)
                # prevent a dead serial port from causing the CPU to spin. The user hitting enter will
                # cause it to try and reconnect
                if len(s) == 0:
                    time.sleep(0.1)

                if 'windows' in platform.architecture()[-1].lower():
                    # strip nsh ansi codes
                    s = s.replace("\033[K", "")

                if m.first_byte:
                    m.auto_mavlink_version(s)
                msgs = m.mav.parse_buffer(s)
                if msgs:
                    for msg in msgs:
                        if getattr(m, '_timestamp', None) is None:
                            m.post_message(msg)
                        if msg.get_type() == "BAD_DATA":
                            if self.show_errors:
                                print "MAV error: %s" % msg
                            self.uav_error += 1
                        else:
                            self.parse_uav_msg(msg)
            print "uav_polling Stopped"
            self.uav_polling = False

        self.uav_msg_thread = threading.Thread(target=worker)
        self.uav_msg_thread.start()

    def poll_gcs(self):
        """
        This runs a new thread that listens for messages from the GCS Pixhawk
        and parses them for the GCS, it also forwards relevant messages to the
        UAV
        """
        self.gcs_polling = True

        def worker():
            # Make sure we are connected
            m = self.gcs
            m.mav.heartbeat_send(mavutil.mavlink.MAV_TYPE_GCS,
                                 mavutil.mavlink.MAV_AUTOPILOT_INVALID, 0, 0,
                                 0)
            print("Waiting for heartbeat from %s" % m.address)
            self.gcs.wait_heartbeat()
            print "Found Heardbeat, continuing"
            i = 0
            while self.gcs_polling:
                #                print "gcs_polling round", i
                i += 1
                try:
                    s = m.recv(16 * 1024)
                except Exception:
                    time.sleep(0.1)
                # prevent a dead serial port from causing the CPU to spin. The user hitting enter will
                # cause it to try and reconnect
                if len(s) == 0:
                    time.sleep(0.1)

                if 'windows' in platform.architecture()[-1].lower():
                    # strip nsh ansi codes
                    s = s.replace("\033[K", "")

                if m.first_byte:
                    m.auto_mavlink_version(s)
                msgs = m.mav.parse_buffer(s)
                if msgs:
                    for msg in msgs:
                        if getattr(m, '_timestamp', None) is None:
                            m.post_message(msg)
                        if msg.get_type() == "BAD_DATA":
                            if self.show_errors:
                                print "MAV error: %s" % msg
                            self.gcs_error += 1
                        else:
                            self.parsefwd_gcs_msg(msg)
            print "gcs_polling Stopped"
            self.gcs_polling = False

        self.gcs_msg_thread = threading.Thread(target=worker)
        self.gcs_msg_thread.start()

    def parse_uav_msg(self, m):
        """
        This parses a message received from the UAV and stores the values
        in the class attributes so that the GUI will update
        """
        #        print "Parsing Message"
        typ = m.get_type()
        if typ == 'GLOBAL_POSITION_INT':
            (self.uav_lat, self.uav_lon) = (m.lat / 1.0e7, m.lon / 1.0e7)
            self.uav_velocity = (m.vx / 100.0, m.vy / 100.0, m.vz / 100.0)
        elif typ == 'GPS_RAW':
            pass  # better to just use global position int
            # (self.lat, self.lon) = (m.lat, m.lon)
            # self.__on_change('location')
        elif typ == 'GPS_RAW_INT':
            # (self.lat, self.lon) = (m.lat / 1.0e7, m.lon / 1.0e7)
            self.uav_eph = m.eph
            self.uav_epv = m.epv
            self.uav_satellites_visible = m.satellites_visible
            self.uav_fix_type = m.fix_type
        elif typ == "VFR_HUD":
            self.uav_heading = m.heading
            self.uav_alt = m.alt
            self.uav_airspeed = m.airspeed
            self.uav_groundspeed = m.groundspeed
        elif typ == "ATTITUDE":
            self.uav_pitch = m.pitch
            self.uav_yaw = m.yaw
            self.uav_roll = m.roll
            self.uav_pitchspeed = m.pitchspeed
            self.uav_yawspeed = m.yawspeed
            self.uav_rollspeed = m.rollspeed
        elif typ == "SYS_STATUS":
            self.uav_voltage = m.voltage_battery
            self.uav_current = m.current_battery
            self.uav_level = m.battery_remaining
        elif typ == "HEARTBEAT":
            pass
#        print "Parsing Message DONE"

    def fwd_msg_to_uav(self, m):
        """This forwards messages from the GCS Pixhawk to the UAV if there is
        a UAV connected"""
        if self.uav is not t.Undefined:
            self.uav.write(m.get_msgbuf())

    def parsefwd_gcs_msg(self, m):
        """
        This parses a message received from the GCS Pixhawk, stores the values
        in the class attributes so that the GUI will update, and forwards 
        relevant messages to the UAV
        """
        #        print "Parsing Message"
        typ = m.get_type()
        if typ == 'GLOBAL_POSITION_INT':
            (self.gcs_lat, self.gcs_lon) = (m.lat / 1.0e7, m.lon / 1.0e7)
            self.gcs_velocity = (m.vx / 100.0, m.vy / 100.0, m.vz / 100.0)
            # Forward message
            self.fwd_msg_to_uav(m)
        elif typ == 'GPS_RAW':
            # better to just use global position int
            # (self.lat, self.lon) = (m.lat, m.lon)
            # self.__on_change('location')
            # Forward message
            self.fwd_msg_to_uav(m)
        elif typ == 'GPS_RAW_INT':
            # (self.lat, self.lon) = (m.lat / 1.0e7, m.lon / 1.0e7)
            self.gcs_eph = m.eph
            self.gcs_epv = m.epv
            self.gcs_satellites_visible = m.satellites_visible
            self.gcs_fix_type = m.fix_type
            # Forward message
            self.fwd_msg_to_uav(m)
        elif typ == "VFR_HUD":
            self.gcs_heading = m.heading
            self.gcs_alt = m.alt
            self.gcs_airspeed = m.airspeed
            self.gcs_groundspeed = m.groundspeed
            # Forward message
            self.fwd_msg_to_uav(m)
        elif typ == "ATTITUDE":
            self.gcs_pitch = m.pitch
            self.gcs_yaw = m.yaw
            self.gcs_roll = m.roll
            self.gcs_pitchspeed = m.pitchspeed
            self.gcs_yawspeed = m.yawspeed
            self.gcs_rollspeed = m.rollspeed
            # Forward message
            self.fwd_msg_to_uav(m)
        elif typ == "SYS_STATUS":
            self.gcs_voltage = m.voltage_battery
            self.gcs_current = m.current_battery
            self.gcs_level = m.battery_remaining
        elif typ == "HEARTBEAT":
            # Forward message
            self.fwd_msg_to_uav(m)


#        print "Parsing Message DONE"

    def close(self, *args, **kwargs):
        """
        This closes down the serial connections and stop the GUI polling
        """
        print 'Closing down connection'
        try:
            self.uav_polling = False
            self.uav.close()
        except:
            pass
        try:
            self.gcs_polling = False
            self.gcs.close()
        except:
            pass
Beispiel #17
0
class Matplotlibify(traits.HasTraits):
    logFilePlotReference = traits.Instance(
        logFilePlots.plotObjects.logFilePlot.LogFilePlot)
    plotPropertiesList = traits.List(PlotProperties)
    logFilePlot1 = traits.Any()
    logFilePlot2 = traits.Any()
    logFilePlotsReference = traits.Instance(
        logFilePlots.LogFilePlots)  #refernce to logFilePlots object
    isPriviliged = traits.Bool(False)
    hardCodeLegendBool = traits.Bool(
        False,
        desc=
        "click if you want to write your own legend otherwise it will generate legend based on series and legend replacement dict"
    )
    hardCodeLegendString = traits.String(
        "", desc="comma seperated string for each legend entry")
    #xLim = traits.Tuple()
    replacementStrings = {}
    savedPrintsDirectory = traits.Directory(
        os.path.join("\\\\ursa", "AQOGroupFolder", "Experiment Humphry",
                     "Data", "savedPrints"))
    showWaterMark = traits.Bool(True)

    matplotlibifyMode = traits.Enum("default", "dual plot")
    generatePlotScriptButton = traits.Button("generate plot")
    showPlotButton = traits.Button("show")
    #templatesFolder = os.path.join( os.path.expanduser('~'),"Google Drive","Thesis","python scripts","matplotlibify")
    templatesFolder = os.path.join("\\\\ursa", "AQOGroupFolder",
                                   "Experiment Humphry",
                                   "Experiment Control And Software",
                                   "LogFilePlots", "matplotlibify",
                                   "templates")
    templateFile = traits.File(
        os.path.join(templatesFolder, "matplotlibifyDefaultTemplate.py"))
    generatedScriptLocation = traits.File(
        os.path.join(os.path.expanduser('~'), "Google Drive", "Thesis",
                     "python scripts", "matplotlibify", "debug.py"))
    saveToOneNote = traits.Button("Save to OneNote")
    printButton = traits.Button("print")
    dualPlotMode = traits.Enum('sharedXY', 'sharedX', 'sharedY', 'stacked',
                               'stackedX', 'stackedY')
    logLibrarianReference = None

    secondPlotGroup = traitsui.VGroup(
        traitsui.Item("matplotlibifyMode", label="mode"),
        traitsui.HGroup(
            traitsui.Item("logFilePlot1",
                          visible_when="matplotlibifyMode=='dual plot'"),
            traitsui.Item("logFilePlot2",
                          visible_when="matplotlibifyMode=='dual plot'"),
            traitsui.Item('dualPlotMode',
                          visible_when="matplotlibifyMode=='dual plot'",
                          show_label=False)),
    )
    plotPropertiesGroup = traitsui.Item(
        "plotPropertiesList",
        editor=traitsui.ListEditor(style="custom"),
        show_label=False,
        resizable=True)

    generalGroup = traitsui.VGroup(
        traitsui.Item("showWaterMark", label="show watermark"),
        traitsui.HGroup(
            traitsui.Item("hardCodeLegendBool", label="hard code legend?"),
            traitsui.Item("hardCodeLegendString",
                          show_label=False,
                          visible_when="hardCodeLegendBool")),
        traitsui.Item("templateFile"),
        traitsui.Item("generatedScriptLocation", visible_when='isPriviliged'),
        traitsui.Item('generatePlotScriptButton', visible_when='isPriviliged'),
        traitsui.Item('showPlotButton'),
        traitsui.Item(
            'saveToOneNote', enabled_when='True'
        ),  # was deactivated for some time, probably there was an error, I try to debug this now
        traitsui.Item('printButton'))

    traits_view = traitsui.View(secondPlotGroup,
                                plotPropertiesGroup,
                                generalGroup,
                                resizable=True,
                                kind='live')

    def __init__(self, **traitsDict):
        super(Matplotlibify, self).__init__(**traitsDict)
        self.plotPropertiesList = [PlotProperties(self.logFilePlotReference)]
        self.generateReplacementStrings()
        self.add_trait(
            "logFilePlot1",
            traits.Trait(
                self.logFilePlotReference.logFilePlotsTabName, {
                    lfp.logFilePlotsTabName: lfp
                    for lfp in self.logFilePlotsReference.lfps
                }))
        self.add_trait(
            "logFilePlot2",
            traits.Trait(
                self.logFilePlotReference.logFilePlotsTabName, {
                    lfp.logFilePlotsTabName: lfp
                    for lfp in self.logFilePlotsReference.lfps
                }))

    def generateReplacementStrings(self):
        self.replacementStrings = {}
        if self.matplotlibifyMode == 'default':
            specific = self.plotPropertiesList[
                0].getReplacementStringsSpecific(identifier="")
            generic = self.getGlobalReplacementStrings()
            self.replacementStrings.update(specific)
            self.replacementStrings.update(generic)

        elif self.matplotlibifyMode == 'dual plot':
            specific1 = self.plotPropertiesList[
                0].getReplacementStringsSpecific(identifier="lfp1.")
            specific2 = self.plotPropertiesList[
                1].getReplacementStringsSpecific(identifier="lfp2.")
            generic = self.getGlobalReplacementStrings()
            self.replacementStrings.update(specific1)
            self.replacementStrings.update(specific2)
            self.replacementStrings.update(generic)

        for key in self.replacementStrings.keys(
        ):  #wrap strings in double quotes
            logger.info("%s = %s" % (self.replacementStrings[key],
                                     type(self.replacementStrings[key])))
            if isinstance(self.replacementStrings[key], (str, unicode)):
                if self.replacementStrings[key].startswith("def "):
                    continue  #if it is a function definition then dont wrap in quotes!
                else:
                    self.replacementStrings[key] = unicode(
                        self.wrapInQuotes(self.replacementStrings[key]))

    def getGlobalReplacementStrings(self, identifier=""):
        """generates the replacement strings that are specific to a log file plot """
        return {
            '{{%shardCodeLegendBool}}' % identifier: self.hardCodeLegendBool,
            '{{%shardCodeLegendString}}' % identifier:
            self.hardCodeLegendString,
            '{{%smatplotlibifyMode}}' % identifier: self.matplotlibifyMode,
            '{{%sshowWaterMark}}' % identifier: self.showWaterMark,
            '{{%sdualPlotMode}}' % identifier: self.dualPlotMode
        }

    def wrapInQuotes(self, string):
        return '"%s"' % string

    def _isPriviliged_default(self):
        if os.path.exists(
                os.path.join("C:", "Users", "tharrison", "Google Drive",
                             "Thesis", "python scripts", "matplotlibify")):
            return True
        else:
            return False

    def _generatedScriptLocation_default(self):
        root = os.path.join("C:", "Users", "tharrison", "Google Drive",
                            "Thesis", "python scripts", "matplotlibify")
        head, tail = os.path.split(self.logFilePlotReference.logFile)
        matplotlibifyName = os.path.splitext(tail)[0] + "-%s-vs-%s" % (
            self.plotPropertiesList[0]._yAxisLabel_default(),
            self.plotPropertiesList[0]._xAxisLabel_default())
        baseName = os.path.join(root, matplotlibifyName)
        filename = baseName + ".py"
        c = 0
        while os.path.exists(filename + ".py"):
            filename = baseName + "-%s.py" % c
            c += 1
        return filename

    def replace_all(self, text, replacementDictionary):
        for placeholder, new in replacementDictionary.iteritems():
            text = text.replace(placeholder, str(new))
        return text

    def _generatePlotScriptButton_fired(self):
        self.writePlotScriptToFile(self.generatedScriptLocation)

    def writePlotScriptToFile(self, path):
        """writes the script that generates the plot to the path """
        logger.info("attempting to generate matplotlib script...")
        self.generateReplacementStrings()
        with open(self.templateFile, "rb") as template:
            text = self.replace_all(template.read(), self.replacementStrings)
        with open(self.generatedScriptLocation, "wb") as output:
            output.write(text)
        logger.info("succesfully generated matplotlib script at location %s " %
                    self.generatedScriptLocation)

    def autoSavePlotWithMatplotlib(self, path):
        """runs the script with an appended plt.save() and plt.close("all")"""
        logger.info("attempting to save matplotlib plot...")
        self.generateReplacementStrings()
        with open(self.templateFile, "rb") as template:
            text = self.replace_all(template.read(), self.replacementStrings)
        ns = {}
        saveCode = "\n\nplt.savefig(r'%s', dpi=300)\nplt.close('all')" % path
        logger.info("executing save statement:%s" % saveCode)
        text += saveCode
        exec text in ns
        logger.info("exec completed succesfully...")

    def _showPlotButton_fired(self):
        logger.info("attempting to show matplotlib plot...")
        self.generateReplacementStrings()
        with open(self.templateFile, "rb") as template:
            text = self.replace_all(template.read(), self.replacementStrings)
        ns = {}
        exec text in ns
        logger.info("exec completed succesfully...")

    def _saveToOneNote_fired(self):
        """calls the lfp function to save the file in the log folder and then
        save it to oneNote. THis way there is no oneNote code in matplotlibify"""
        if self.logLibrarianReference is None:
            self.logFilePlotReference.savePlotAsImage(self)
        else:
            self.logFilePlotReference.savePlotAsImage(
                self, self.logLibrarianReference)

    def _matplotlibifyMode_changed(self):
        """change default template depending on whether or not this is a double axis plot """
        if self.matplotlibifyMode == "default":
            self.templateFile = os.path.join(
                self.templatesFolder, "matplotlibifyDefaultTemplate.py")
            self.plotPropertiesList = [
                PlotProperties(self.logFilePlotReference)
            ]
        elif self.matplotlibifyMode == "dual plot":
            self.templateFile = os.path.join(
                self.templatesFolder, "matplotlibifyDualPlotTemplate.py")
            if len(self.plotPropertiesList) > 1:
                self.plotPropertiesList[1] = PlotProperties(
                    self.logFilePlot2_)  #or should it be logFilePlot2_???
                logger.info("chanigng second element of plot properties list")
            elif len(self.plotPropertiesList) == 1:
                self.plotPropertiesList.append(
                    PlotProperties(self.logFilePlot2_))
                logger.info("appending to plot properties list")
            else:
                logger.error(
                    "there only be 1 or 2 elements in plot properties but found %s elements"
                    % len(self.plotPropertiesList))

    def _logFilePlot1_changed(self):
        """logFilePlot1 changed so update plotPropertiesList """
        logger.info("logFilePlot1 changed. updating plotPropertiesList")
        self.plotPropertiesList[0] = PlotProperties(self.logFilePlot1_)

    def _logFilePlot2_changed(self):
        logger.info("logFilePlot2 changed. updating plotPropertiesList")
        self.plotPropertiesList[1] = PlotProperties(self.logFilePlot2_)

    def dualPlotModeUpdates(self):
        """called when either _logFilePlot1 or _logFilePLot2 change """
        if (self.logFilePlot1_.xAxis == self.logFilePlot2_.xAxis
            ):  #Twin X 2 y axes mode
            if self.logFilePlot1_.yAxis == self.logFilePlot2_.yAxis:
                self.dualPlotMode = 'sharedXY'
            else:
                self.dualPlotMode = 'sharedX'
        elif self.logFilePlot1_.yAxis == self.logFilePlot2_.yAxis:
            self.dualPlotMode = 'sharedY'
        else:
            self.dualPlotMode = 'stacked'

    def _printButton_fired(self):
        """uses windows built in print image functionality to send png of plot to printer """
        logFolder, tail = os.path.split(self.logFilePlotReference.logFile)
        #logName = tail.strip(".csv")+" - "+str(self.selectedLFP.xAxis)+" vs "+str(self.selectedLFP.yAxis)
        imageFileName = os.path.join(logFolder, "temporary_print.png")
        self.logFilePlotReference.savePlotAsImage(self,
                                                  name=imageFileName,
                                                  oneNote=False)
        logger.info("attempting to use windows native printing dialog")
        os.startfile(os.path.normpath(imageFileName), "print")
        logger.info("saving to savedPrints folder")
        head, tail = os.path.split(self._generatedScriptLocation_default())
        tail = tail.replace(".py", ".png")
        dst = os.path.join(self.savedPrintsDirectory, tail)
        shutil.copyfile(os.path.normpath(imageFileName), dst)
        logger.info("saved to savedPrints folder")
Beispiel #18
0
class Node(HasTraits):
    """A node in the :class:`Graph` and associated state.

    :class:`Node`\ s can be composed using bitwise :meth:`~object.__and__` and
    :meth:`~object.__or__` operators to denote sequential or parallel
    evaluation order, respectively.

    For example, give ``A``, ``B``, and ``C`` functions that have been
    lifted to a :class:`Node` type (eg through the :class:`delayed`
    decorator ``@delayed()``), to evaluate ``A`` and ``B`` in parallel,
    then ``C``:

    .. code-block:: python

      G = (  (A(argA0, argA1) | B()) & C(argC)  ).graph

    will create the call :class:`Graph` ``G``.
    In order to evaluate ``G``:

    .. code-block:: python

      evaluate(G)

    """

    id = T.Trait(uuid.UUID)
    """(:class:`~uuid.UUID`) The node id. This is also the key to find the
    node in the :class:`Graph`.

    """

    graph = T.Trait(Graph)
    """(:class:`Graph`) The call :class:`Graph` in which the node is
    located"""

    executor = T.Trait(futures.Executor)
    """(:class:`~concurrent.futures.Executor`) The execution context for
    evaluating this node (see eg
    :class:`~concurrent.futures.ThreadPoolExecutor`)

    """

    timeout = T.Any()
    """(:class:`int`) How long to wait for execution to complete. See also
    :meth:`~concurrent.futures.Future.result`.

    """

    f = T.Function()
    """(:class:`~types.FunctionType`) The function to evaluate"""

    name = T.String()
    """(:class:`str`) Name of the function, usually short for
    ``self.f.func_name``

    """

    result = T.Trait(futures.Future)
    # The explicit link to `Future` is done because intersphince does
    # not find it
    """(concurrent.futures.Future_) The result and status of evaluation.

    .. _concurrent.futures.Future: https://pythonhosted.org/futures/index.html#future-objects

    """
    def __init__(self, f_args_kws, graph=None, executor=None, timeout=None):
        """Create a :class:`Node` to evaluate a function ``f`` in some
        ``graph`` using a given ``executor``

        :param func: f_args_kws = (f, args, kws) a 3-tuple of the
                                  function to evaluate (any callable)
                                  along with positional and keywork
                                  arguments.

        :param graph: The :class:`Graph` in which to insert the node
                      upon composition with others. A value of
                      ``None`` will create a new graph. When composed
                      with another node in a different
                      :func:`Node.graph` the two graphs with be
                      merged.

        :param executor: a :class:`futures.Executor` instance
        :param timeout: seconds (float or int) to wait.

        """
        self.id = nodeid()
        f, args, kws = f_args_kws
        self.f = f
        self._args = args
        self._kws = kws
        self.name = f.func_name
        self.graph = graph
        self.executor = executor or futures.ThreadPoolExecutor(cpu_count())
        self.timeout = timeout

    def _init_graph(self, graph=None):
        """Initialize the `graph` attribute

        Create a :class:`Graph` for this node if necessary.

        :param graph: the :class:`Graph` to use if ours is ``None``
        """
        if self.graph is None and graph is None:
            self.graph = Graph()

        elif self.graph is None and graph is not None:
            self.graph = graph

    def _merge_graph(self, other):
        """Combine this :class:`Node`'s graph with ``other``'s :class:`Graph`.

        .. node::
          This updates ``this.graph`` **in-place**

        :param other: another instance of :class:`Node`
        """

        if not self.graph == other.graph:
            for s, t, data in other.graph.edges_iter(data=True):
                sn = other.graph.node[s]
                tn = other.graph.node[t]
                self.graph.add_node(s, sn)
                self.graph.add_node(t, tn)
                self.graph.add_edge(s, t, data)
            other.graph = self.graph

    @property
    def children_iter(self):
        """Generator of :class:`Node`\ s

        This ``yield``'s all the children :class:`Node`\ s of this node.

        :returns: Child nodes of this node.
        :rtype: generator of :class:`Node`
        """

        edges = set(self.graph.edges())
        for i in self.graph.successors_iter(self.id):
            child = self.graph.node[i]['node']
            assert (self.id, i) in edges
            yield child

    @property
    def children(self):
        """[:class:`Node`]

        The children of this node.
        See :func:`Node.children_iter`

        :rtype: list of :class:`Node`
        """
        return list(self.children_iter)

    def start(self):
        """start() -> None

        Start evaluating this node

        Start evaluating this nodes function ``self.f`` if it hasn't
        already started.

        """

        if self.result is None:
            self.result = self.executor.submit(self.f, *self._args,
                                               **self._kws)
        else:
            # already started
            pass

    def wait(self):
        """wait() -> None

        Wait for this node to finish evaluating

        This may timeout if :attr:`~Node.timeout` is specified.

        """
        self.result.result(self.timeout)

    def eval(self):
        """eval() -> None

        Start and wait for a node.

        """
        self.start()
        self.wait()

    def compose(self, other, MkOpNode):
        """compose(other, callable(graph=Graph)) -> OpNode

        Compose this :class:`Node` with another :class:`Node`.

        Two Nodes are composed using a proxy :class:`OpNode`.  The
        OpNode defines the evaluation semantics of its child nodes (eg
        sequantial or parallel).

        :param other: a :class:`Node`

        :param MkOpNode: a callable with keyword arg graph constructor
                         for the proxy node

        :returns: A new :class:`Node` with ``self`` and ``other`` and
                  children.

        :rtype: :class:`Node`

        """

        self._init_graph()
        other._init_graph(self.graph)
        self._merge_graph(other)

        assert self.graph is not None, self.name
        assert other.graph is not None, other.name
        assert self.graph == other.graph
        G = self.graph

        # print self.name, operator, other.name

        s, t = self.id, other.id
        other.id = t
        op = MkOpNode(graph=G)
        G.add_node(op.id, node=op, label=op.name)
        G.add_node(s, node=self, label=self.name)
        G.add_node(t, node=other, label=other.name)
        G.add_edge(op.id, s)
        G.add_edge(op.id, t)

        return op

    def __and__(self, other):
        """Sequential composition

        :param other: the :class:`Node` to evaluate **after** this
                      :class:`Node`

        :returns: the node composition (see :func:`Node.compose`)
        :rtype: :class:`Node`

        """

        return self.compose(other, AndNode)

    def __or__(self, other):
        """Parallel composition

        :param other: The :class:`Node to evaluate along with this
                      :class:`Node`.

        :returns: the node composition (see :func:`Node.compose`)
        :rtype: :class:`Node`

        """

        return self.compose(other, OrNode)
Beispiel #19
0
class Matplotlibify(traits.HasTraits):

    logFilePlotReference = traits.Instance(
        logFilePlot.LogFilePlot
    )  #gives access to most of the required attributes
    logFilePlotsReference = traits.Instance(
        logFilePlots.LogFilePlots)  #refernce to logFilePlots object
    xAxisLabel = traits.String("")
    yAxisLabel = traits.String("")
    xAxisLabel2 = traits.String("")  #used if in dual plot mode
    yAxisLabel2 = traits.String("")
    legendReplacements = traits.Dict(key_trait=traits.String,
                                     value_trait=traits.String)
    #xLim = traits.Tuple()
    replacementStrings = {}
    setXLimitsBool = traits.Bool(False)
    setYLimitsBool = traits.Bool(False)

    xMin = traits.Float
    xMax = traits.Float
    yMin = traits.Float
    yMax = traits.Float

    matplotlibifyMode = traits.Enum("default", "dual plot")
    logFilePlot1 = traits.Any(
    )  #will be mapped traits of name of log file plot to lfp reference
    logFilePlot2 = traits.Any(
    )  #will be mapped traits of name of log file plot to lfp reference

    generatePlotScriptButton = traits.Button("generate plot")
    showPlotButton = traits.Button("show")
    templatesFolder = os.path.join("C:", "Users", "tharrison", "Google Drive",
                                   "Thesis", "python scripts", "matplotlibify")
    templateFile = traits.File(
        os.path.join(templatesFolder, "matplotlibifyDefaultTemplate.py"))
    generatedScriptLocation = traits.File(
        os.path.join("C:", "Users", "tharrison", "Google Drive", "Thesis",
                     "python scripts", "matplotlibify", "debug.py"))

    secondPlotGroup = traitsui.VGroup(
        traitsui.Item("matplotlibifyMode", label="add second plot"),
        traitsui.HGroup(
            traitsui.Item("logFilePlot1",
                          visible_when="matplotlibifyMode=='dual plot'"),
            traitsui.Item("logFilePlot2",
                          visible_when="matplotlibifyMode=='dual plot'")))

    labelsGroup = traitsui.VGroup(
        traitsui.HGroup(traitsui.Item("xAxisLabel"),
                        traitsui.Item("yAxisLabel")),
        traitsui.HGroup(
            traitsui.Item("xAxisLabel2",
                          label="X axis label (2nd)",
                          visible_when="matplotlibifyMode=='dual plot'"),
            traitsui.Item("yAxisLabel2",
                          label="Y axis label (2nd)",
                          visible_when="matplotlibifyMode=='dual plot'")))

    limitsGroup = traitsui.VGroup(
        traitsui.Item("setXLimitsBool", label="set x limits?"),
        traitsui.Item("setYLimitsBool", label="set x limits?"),
        traitsui.HGroup(
            traitsui.Item("xMin", label="x min",
                          visible_when="setXLimitsBool"),
            traitsui.Item("xMax", label="x max",
                          visible_when="setXLimitsBool"),
            traitsui.Item("yMin", label="y min",
                          visible_when="setYLimitsBool"),
            traitsui.Item("yMax", label="y max",
                          visible_when="setYLimitsBool")))

    traits_view = traitsui.View(secondPlotGroup,
                                labelsGroup,
                                limitsGroup,
                                traitsui.Item("legendReplacements"),
                                traitsui.Item("templateFile"),
                                traitsui.Item("generatedScriptLocation"),
                                traitsui.Item('generatePlotScriptButton'),
                                traitsui.Item('showPlotButton'),
                                resizable=True)

    def __init__(self, **traitsDict):
        super(Matplotlibify, self).__init__(**traitsDict)
        self.generateReplacementStrings()
        self.add_trait(
            "logFilePlot1",
            traits.Trait(
                self.logFilePlotReference.logFilePlotsTabName, {
                    lfp.logFilePlotsTabName: lfp
                    for lfp in self.logFilePlotsReference.lfps
                }))
        self.add_trait(
            "logFilePlot2",
            traits.Trait(
                self.logFilePlotReference.logFilePlotsTabName, {
                    lfp.logFilePlotsTabName: lfp
                    for lfp in self.logFilePlotsReference.lfps
                }))

    def generateReplacementStrings(self):
        self.replacementStrings = {}

        if self.matplotlibifyMode == 'default':
            specific = self.getReplacementStringsFor(self.logFilePlotReference)
            generic = self.getGlobalReplacementStrings()
            self.replacementStrings.update(specific)
            self.replacementStrings.update(generic)

        elif self.matplotlibifyMode == 'dual plot':
            specific1 = self.getReplacementStringsFor(self.logFilePlot1_,
                                                      identifier="lfp1.")
            specific2 = self.getReplacementStringsFor(self.logFilePlot2_,
                                                      identifier="lfp2.")
            generic = self.getGlobalReplacementStrings()
            self.replacementStrings.update(specific1)
            self.replacementStrings.update(specific2)
            self.replacementStrings.update(generic)

        for key in self.replacementStrings.keys(
        ):  #wrap strings in double quotes
            logger.info("%s = %s" % (self.replacementStrings[key],
                                     type(self.replacementStrings[key])))
            if isinstance(self.replacementStrings[key], (str, unicode)):
                self.replacementStrings[key] = unicode(
                    self.wrapInQuotes(self.replacementStrings[key]))

    def getReplacementStringsFor(self, logFilePlot, identifier=""):
        """generates the replacement strings that are specific to a log file plot.
        indentifier is used inside key to make it unique to that lfp and should have the format
        {{lfp.mode}}. Identifier must include the . character"""
        return {
            '{{%smode}}' % identifier:
            logFilePlot.mode,
            '{{%slogFile}}' % identifier:
            logFilePlot.logFile,
            '{{%sxAxis}}' % identifier:
            logFilePlot.xAxis,
            '{{%syAxis}}' % identifier:
            logFilePlot.yAxis,
            '{{%saggregateAxis}}' % identifier:
            logFilePlot.aggregateAxis,
            '{{%sseries}}' % identifier:
            logFilePlot.series,
            '{{%sfiterYs}}' % identifier:
            logFilePlot.filterYs,
            '{{%sfilterMinYs}}' % identifier:
            logFilePlot.filterMinYs,
            '{{%sfilterMaxYs}}' % identifier:
            logFilePlot.filterMaxYs,
            '{{%sfilterXs}}' % identifier:
            logFilePlot.filterXs,
            '{{%sfilterMinXs}}' % identifier:
            logFilePlot.filterMinXs,
            '{{%sfilterMaxXs}}' % identifier:
            logFilePlot.filterMaxXs,
            '{{%sfilterNaN}}' % identifier:
            logFilePlot.filterNaN,
            '{{%sfilterSpecific}}' % identifier:
            logFilePlot.filterSpecific,
            '{{%sfilterSpecificString}}' % identifier:
            logFilePlot.filterSpecificString,
            '{{%sxLogScale}}' % identifier:
            logFilePlot.xLogScale,
            '{{%syLogScale}}' % identifier:
            logFilePlot.yLogScale,
            '{{%sinterpretAsTimeAxis}}' % identifier:
            logFilePlot.interpretAsTimeAxis
        }

    def getGlobalReplacementStrings(self, identifier=""):
        """generates the replacement strings that are specific to a log file plot """
        return {
            '{{%sxAxisLabel}}' % identifier: self.xAxisLabel,
            '{{%syAxisLabel}}' % identifier: self.yAxisLabel,
            '{{%sxAxisLabel2}}' % identifier: self.xAxisLabel2,
            '{{%syAxisLabel2}}' % identifier: self.yAxisLabel2,
            '{{%slegendReplacements}}' % identifier: self.legendReplacements,
            '{{%ssetXLimitsBool}}' % identifier: self.setXLimitsBool,
            '{{%ssetYLimitsBool}}' % identifier: self.setYLimitsBool,
            '{{%sxlimits}}' % identifier: (self.xMin, self.xMax),
            '{{%sylimits}}' % identifier: (self.yMin, self.yMax),
            '{{%smatplotlibifyMode}}' % identifier: self.matplotlibifyMode
        }

    def wrapInQuotes(self, string):
        return '"%s"' % string

    def _xAxisLabel_default(self):
        return self.logFilePlotReference.xAxis

    def _yAxisLabel_default(self):
        return self.logFilePlotReference.yAxis

    def _legendReplacements_default(self):
        return {_: _ for _ in self.logFilePlotReference.parseSeries()}

    def _xMin_default(self):
        return self.logFilePlotReference.firstPlot.x_axis.mapper.range.low

    def _xMax_default(self):
        return self.logFilePlotReference.firstPlot.x_axis.mapper.range.high

    def _yMin_default(self):
        return self.logFilePlotReference.firstPlot.y_axis.mapper.range.low

    def _yMax_default(self):
        return self.logFilePlotReference.firstPlot.y_axis.mapper.range.high

    def _generatedScriptLocation_default(self):
        root = os.path.join("C:", "Users", "tharrison", "Google Drive",
                            "Thesis", "python scripts", "matplotlibify")
        head, tail = os.path.split(self.logFilePlotReference.logFile)
        matplotlibifyName = os.path.splitext(tail)[0] + "-%s-vs-%s" % (
            self._yAxisLabel_default(), self._xAxisLabel_default())
        baseName = os.path.join(root, matplotlibifyName)
        filename = baseName + ".py"
        c = 0
        while os.path.exists(baseName + ".py"):
            filename = baseName + "-%s.py" % c
        return filename

    def replace_all(self, text, replacementDictionary):
        for placeholder, new in replacementDictionary.iteritems():
            text = text.replace(placeholder, str(new))
        return text

    def _generatePlotScriptButton_fired(self):
        logger.info("attempting to generate matplotlib script...")
        self.generateReplacementStrings()
        with open(self.templateFile, "rb") as template:
            text = self.replace_all(template.read(), self.replacementStrings)
        with open(self.generatedScriptLocation, "wb") as output:
            output.write(text)
        logger.info("succesfully generated matplotlib script at location %s " %
                    self.generatedScriptLocation)

    def _showPlotButton_fired(self):
        logger.info("attempting to show matplotlib plot...")
        self.generateReplacementStrings()
        with open(self.templateFile, "rb") as template:
            text = self.replace_all(template.read(), self.replacementStrings)
        ns = {}
        exec text in ns
        logger.info("exec completed succesfully...")

    def _matplotlibifyMode_changed(self):
        """change default template depending on whether or not this is a double axis plot """
        if self.matplotlibifyMode == "default":
            self.templateFile = os.path.join(
                self.templatesFolder, "matplotlibifyDefaultTemplate.py")
        elif self.matplotlibifyMode == "dual plot":
            self.templateFile = os.path.join(
                self.templatesFolder, "matplotlibifyDualPlotTemplate.py")
            self.xAxisLabel2 = self.logFilePlot2.xAxis
            self.yAxisLabel2 = self.logFilePlot2.yAxis
class Librarian(traits.HasTraits):
    """Librarian provides a way of writing useful information into the 
    log folder for eagle logs. It is designed to make the information inside
    an eagle log easier to come back to. It mainly writes default strings into
    the comments file in the log folder"""
    
    sectionNames =  traits.List(traits.Str)
    sectionName = traits.Enum(values="sectionNames",desc="Investigation this eagle Log is for. Corresponds to tab in OneNote")
    
    importance = traits.Range(1,3,1)
    availableUsers = traits.List(traits.Str) # loaded from json file
    user = traits.Enum(values='availableUsers')
    keywords = traits.String('')
    writeToOneNoteButton = traits.Button("save")
    refreshInformation = traits.Button("refresh")
    # saveImage = traits.Button("save plot") # removed, because there is the same functionality as the button of the Matplotlibify dialog
    axisList = AxisSelector()
    purposeBlock = EntryBlock(fieldName="What is the purpose of this log?")
    resultsBlock = EntryBlock(fieldName = "Explain what the data shows (important parameters that change, does it make sense etc.)?")
    commentsBlock = EntryBlock(fieldName = "Anything Else?")
    matplotlibifyReference = traits.Any()#
    logFilePlotReference = traits.Any()#traits.Instance(plotObjects.logFilePlot.LogFilePlot)#gives access to most of the required attributes
    logFilePlotsReference = traits.Any()#traits.Instance(logFilePlots.LogFilePlots)#refernce to logFilePlots object
#    notebooks = traits.Enum(values = "notebookNames") # we could let user select from a range of notebooks
#    notebookNames = traits.List
    notebookName = traits.String("Investigations")
    
    logName = traits.String("")
    xAxis = traits.String("")
    yAxis = traits.String("")

    traits_view = traitsui.View(
        traitsui.VGroup(
            traitsui.HGroup(traitsui.Item("sectionName",label="investigation"),traitsui.Item("user",label="user")),
            traitsui.Item("importance",label="importance",style='custom'),
            traitsui.Item("logName",show_label=False, style="readonly"),            
            traitsui.Item("keywords",label='keywords'),            
            traitsui.Item("axisList",show_label=False, editor=traitsui.InstanceEditor(),style='custom'),
            traitsui.Item("purposeBlock",show_label=False, editor=traitsui.InstanceEditor(),style='custom'),
            traitsui.Item("resultsBlock",show_label=False, editor=traitsui.InstanceEditor(),style='custom'),
            traitsui.Item("commentsBlock",show_label=False, editor=traitsui.InstanceEditor(),style='custom'),
            traitsui.Item("matplotlibifyReference",show_label=False, editor=traitsui.InstanceEditor(),style='custom'),
            traitsui.HGroup(
                # traitsui.Item("saveImage",show_label=False), # see above why commented
                traitsui.Item("writeToOneNoteButton",show_label=False),
                traitsui.Item("refreshInformation",show_label=False)
            ),
        )  , resizable=True  , kind ="live", title="Eagle OneNote", width=300, height=500
    )    
    
    def __init__(self, **traitsDict):
        """Librarian object requires the log folder it is referring to. If a .csv
        file is given as logFolder argument it will use parent folder as the 
        logFolder"""
        super(Librarian, self).__init__(**traitsDict)
        if os.path.isfile(self.logFolder):
            self.logFolder = os.path.split(self.logFolder)[0]
        else:
            logger.debug("found these in %s: %s" %(self.logFolder, os.listdir(self.logFolder) ))
        # load global settings
        filename = os.path.join("config","librarian.json")
        with open(filename, 'r') as f:
            settings = json.load( f )
            self.availableUsers = settings["users"]
        import matplotlibify.matplotlibify
        self.matplotlibifyReference = matplotlibify.matplotlibify.Matplotlibify(logFilePlotReference=self.logFilePlotReference,logFilePlotsReference=self.logFilePlotsReference, logLibrarianReference=self)
        self.matplotlibifyReference.templatesFolder = os.path.join("\\\\ursa","AQOGroupFolder","Experiment Humphry","Experiment Control And Software","LogFilePlots","matplotlibify","templates")
        self.matplotlibifyReference.templateFile = os.path.join(self.matplotlibifyReference.templatesFolder,"matplotlibifyDefaultTemplate.py")
        self.matplotlibifyReference.generatedScriptLocation = os.path.join(self.matplotlibifyReference.templatesFolder)
        self.logName = os.path.split(self.logFolder)[1]
        self.logFile = os.path.join(self.logFolder, os.path.split(self.logFolder)[1]+".csv")
        self.axisList.logFile = self.logFile#needs a copy so it can calculate valid values
        self.axisList.masterList = self.axisList._masterList_default()
        self.axisList.masterListWithNone = self.axisList._masterListWithNone_default() 
        if self.xAxis != "":
            self.axisList.xAxis = self.xAxis
        if self.yAxis != "":
            self.axisList.yAxis = self.yAxis
        
        self.eagleOneNote = eagleLogsOneNote.EagleLogOneNote(notebookName = self.notebookName, sectionName = self.sectionName)
        self.sectionNames = self._sectionNames_default()
        self.setupLogPage()

        # load local settings
        filename = os.path.join(self.logFolder,"logFilePlotSettings","librarian.json")
        if os.path.exists(filename):
            with open(filename, 'r') as f:
                settings = json.load( f )
                self.sectionName = settings["section"]

    def setupLogPage(self):
        """setup logPage object and change text boxes in GUI accordingly """
        logPage = self.eagleOneNote.setPage(self.logName)
#        
#        except Exception as e:
#            logger.error("failed to created an EagleOneNote Instance. This could happen for many reasons. E.g. OneNote not installed or most likely, the registry is not correct. See known bug and fix in source code of onenotepython module:%s" % e.message)
        if logPage is not None:#page exists
            self.purposeBlock.textBlock = self.eagleOneNote.getOutlineText("purpose")
            self.resultsBlock.textBlock = self.eagleOneNote.getOutlineText("results")
            self.commentsBlock.textBlock = self.eagleOneNote.getOutlineText("comments")
            xAxis,yAxis,series = self.eagleOneNote.getParametersOutlineValues()
            try:
                self.axisList.xAxis,self.axisList.yAxis,self.axisList.series = xAxis,yAxis,series
            except Exception as e:
                logger.error("error when trying to read analysis parameters: %s" % e.message)
            self.pageExists = True
        else:
            self.pageExists = False
            self.purposeBlock.textBlock = ""
            self.resultsBlock.textBlock = ""
            self.commentsBlock.textBlock = ""
            #could also reset axis list but it isn't really necessary
        
    def isUserInputGood(self):
        """runs a bunch of checks and provides a warning if user has written a bad eagle log.
        Returns True if the user has written a good eagle log and false if they havent"""
        issueStrings = []
        if self.axisList.series == "None":
            issueStrings.append("You have no Series selected, are there really no series for this log? If there are multiple series note this down in the Comments box.")
        if len(self.purposeBlock.textBlock)<200:
            issueStrings.append("The purpose text of your librarian is less than 200 characters. This is too short! Describe the measurement so that you could remember what you are doing in 2 years time.")
        if len(self.resultsBlock.textBlock)<200:
            issueStrings.append("The results text of your librarian is less than 200 characters. This is too short! Describe the results so that you can understand what was learnt in 2 years time. If this doesn't need to be long why do you need to save the log?")
        numberOfIssues = len(issueStrings)
        if numberOfIssues == 0:
            return True
        else:
            finalString = "There are issues with your Eagle Log\nPlease check the below points. If you are happy to continue press OK, otherwise press cancel and correct these issues.\n"
            finalString+= "u\n\u2022".join(issueStrings)
            return traitsui.error(message=finalString)

    def _sectionNames_default(self):
        """returns the the list of section (tab) names in the oneNote """
        if hasattr(self, "eagleOneNote"):
            return self.eagleOneNote.getSectionNames()
        else:
            return ['General']
            
    def _sectionName_changed(self):
        purposeBlockTemp = self.purposeBlock
        resultsBlockTemp = self.resultsBlock
        commentsBlocktemp = self.commentsBlock
        if hasattr(self, "eagleOneNote"):
            self.eagleOneNote.sectionName=self.sectionName
            self.eagleOneNote.eagleLogPages = self.eagleOneNote.notebook.getPagesfromSectionName(self.sectionName)
            self.setupLogPage()
        self.purposeBlock = purposeBlockTemp 
        self.resultsBlock = resultsBlockTemp
        self.commentsBlock = commentsBlocktemp
    
    def _writeToOneNoteButton_fired(self):
        """writes content of librarian to one note page """
        isGoodLibrarian = self.isUserInputGood()#False and they clicked cancel so just return
        if not isGoodLibrarian:
            return
        if not self.pageExists:
            self.eagleOneNote.createNewEagleLogPage(self.logName, refresh=True, setCurrent=True)
            self.pageExists = True
        self.eagleOneNote.setOutline("purpose", self.purposeBlock.textBlock,rewrite=False)
        self.eagleOneNote.setOutline("results", self.resultsBlock.textBlock,rewrite=False)
        automatic_comments = self._generate_automatic_additional_comments()
        self.eagleOneNote.setOutline("comments", automatic_comments+self.commentsBlock.textBlock,rewrite=False)
        self.eagleOneNote.setDataOutline(self.logName, rewrite=False)
        self.eagleOneNote.setParametersOutline(self.axisList.xAxis, self.axisList.yAxis, self.axisList.series, rewrite=False)
        self.eagleOneNote.currentPage.rewritePage()
        #now to get resizing done well we want to completely repull the XML and data
        #brute force method:
        self.eagleOneNote = eagleLogsOneNote.EagleLogOneNote(notebookName = self.notebookName, sectionName = self.sectionName)
        logPage = self.eagleOneNote.setPage(self.logName)#this sets current page of eagleOneNote
        self.eagleOneNote.organiseOutlineSizes()
        self.saveLocalSettings()


    def saveImageToFile(self,name=None,oneNote=True):
        if name is None:#use automatic location
            logFolder,tail = os.path.split(self.logFile)
            #logName = tail.strip(".csv")+" - "+str(self.xAxis)+" vs "+str(self.yAxis)
            logName =str(self.xAxis)+" vs "+str(self.yAxis)
            filename = datetime.datetime.today().strftime("%Y-%m-%dT%H%M%S")+"-"+logName+".png"
            imageFileName =os.path.join(logFolder, filename)
        else:
            imageFileName = name
    
        logger.info("saving image: %s" % imageFileName)
        self.matplotlibifyReference.autoSavePlotWithMatplotlib(imageFileName)
        
        if oneNote:
            try:
                logger.info("attempting to save image to oneNote page")
                logName =  tail.replace(".csv","")#this is what the name of the page should be in oneNote
                eagleOneNote = eagleLogsOneNote.EagleLogOneNote(notebookName =self.notebookName, sectionName =self.sectionName)
                logPage = eagleOneNote.setPage(logName)
                if logPage is None:
                    logger.info("saving image to one note . page doesn't exist. creating page")
                    eagleOneNote.createNewEagleLogPage(logName, refresh=True, setCurrent=True)
                logger.debug("attempting to save image")
                eagleOneNote.addImageToPlotOutline(imageFileName, (6.3*300,3.87*300),rewrite=True)
                if self.fitLogFileBool and self.fitPlot is not None and self.logFilePlotFitterReference.isFitted:
                    #these conditions mean that we have the fit data we can write to one note with the image!
                    logger.info("also writing fit data to one note")
                    eagleOneNote.setOutline("plots", eagleOneNote.getOutlineText("plots")+self.logFilePlotFitterReference.modelFitResult.fit_report())
                eagleOneNote.currentPage.rewritePage()
                #now to get resizing done well we want to completely repull the XML and data
                #brute force method:
                eagleOneNote = eagleLogsOneNote.EagleLogOneNote(notebookName = self.notebookName, sectionName = self.sectionName)
                logPage = eagleOneNote.setPage(logName)
                eagleOneNote.organiseOutlineSizes()
            
            except Exception as e:
                logger.error("failed to save the image to OneNote. error: %s " % e.message)
    
    def _saveImage_fired(self):
        self.logFilePlotReference.savePlotAsImage(self.matplotlibifyReference,self)
        

    def _generate_automatic_additional_comments(self):
        """generates some text that is always added to the beginning of additional comments """
        return "This log was written by %s. Importance ranking is **%s**.\nKeywords: %s" % (self.user, self.importance, self.keywords)
    
    def saveLocalSettings(self):
        filename = os.path.join(self.logFolder,"logFilePlotSettings","librarian.json")
        settings = {
            "section": self.sectionName
        }
        with open(filename, 'w') as f:
            json.dump( settings, f )
Beispiel #21
0
class EvalModel(t.HasStrictTraits):
    trainer: Trainer = t.Any()
    model: mo.BaseNet = t.DelegatesTo("trainer")
    dl_test: DataLoader = t.DelegatesTo("trainer")
    data_spec: dict = t.DelegatesTo("trainer")
    cuda: bool = t.DelegatesTo("trainer")
    device: str = t.DelegatesTo("trainer")
    loss_func: str = t.DelegatesTo("trainer")
    model_path: str = t.DelegatesTo("trainer")
    has_null_class: bool = t.DelegatesTo("trainer")
    predict_null_class: bool = t.DelegatesTo("trainer")

    # 'prediction' mode employs overlap and reconstructs signal
    #   as a contiguous timeseries w/ optional windowing.
    #   It aims for best accuracy/f1 by using overlap, and will
    #   typically outperform 'training' mode.
    # 'training' mode does not average repeated point and does
    #   not window; it should product acc/loss/f1 similar to
    #   training mode.
    run_mode: str = t.Enum(["prediction", "training"])
    window: str = t.Enum(["hanning", "boxcar"])
    eval_batch_size: int = t.Int(100)

    target_names: ty.List[str] = t.ListStr()

    def _target_names_default(self):
        target_names = self.data_spec["output_spec"][0]["classes"]

        if self.has_null_class:
            assert target_names[0] in ("", "Null")

            if not self.predict_null_class:
                target_names = target_names[1:]

        return target_names

    def _run_model_on_batch(self, data, targets):
        targets = torch.stack(targets)

        if self.cuda:
            data, targets = data.cuda(), targets.cuda()

        output = self.model(data)

        _targets = self.model.transform_targets(targets, one_hot=False)
        if self.loss_func == "cross_entropy":
            _losses = [F.cross_entropy(o, t) for o, t in zip(output, _targets)]
            loss = sum(_losses)
        elif self.loss_func == "binary_cross_entropy":
            _targets_onehot = self.model.transform_targets(targets,
                                                           one_hot=True)
            _losses = [
                F.binary_cross_entropy_with_logits(o, t)
                for o, t in zip(output, _targets_onehot)
            ]
            loss = sum(_losses)
        else:
            raise NotImplementedError(self.loss)

        # Assume only 1 output:

        return loss, output[0], _targets[0], _losses[0]

    def run_test_set(self, dl=None):
        """ Runs `self.model` on `self.dl_test` (or a provided dl) and stores results for subsequent evaluation. """
        if dl is None:
            dl = self.dl_test

        if self.cuda:
            self.model.cuda()
        self.model.eval()
        if self.eval_batch_size:
            dl = DataLoader(dl.dataset,
                            batch_size=self.eval_batch_size,
                            shuffle=False)
        #
        #     # Xc, yc = data.get_x_y_contig('test')
        X, *ys = dl.dataset.tensors
        # X: [N, input_chans, win_len]
        step = int(X.shape[2] / 2)
        assert torch.equal(X[0, :, step], X[1, :, 0])

        losses = []
        outputsraw = []
        outputs = []
        targets = []

        with Timer("run", log_output=False) as tr:
            with Timer("infer", log_output=False) as ti:
                for batch_idx, (data, *target) in enumerate(dl):
                    (
                        batch_loss,
                        batch_output,
                        batch_targets,
                        train_losses,
                    ) = self._run_model_on_batch(data, target)

                    losses.append(batch_loss.detach().cpu().item())
                    outputsraw.append(batch_output.detach().cpu().data.numpy())
                    outputs.append(
                        torch.argmax(batch_output, 1,
                                     False).detach().cpu().data.numpy())
                    targets.append(batch_targets.detach().cpu().data.numpy())
            self.infer_time_s_cpu = ti.interval_cpu
            self.infer_time_s_wall = ti.interval_wall

            self.loss = np.mean(losses)
            targets = np.concatenate(targets, axis=0)  # [N, out_win_len]
            outputsraw = np.concatenate(
                outputsraw, axis=0)  # [N, n_out_classes, out_win_len]
            outputs = np.concatenate(outputs,
                                     axis=0)  # [N, n_out_classes, out_win_len]

            # win_len = toutputsraw[0].shape[-1]
            if (self.model.output_type == "many_to_one_takelast"
                    or self.run_mode == "training"):
                self.targets = np.concatenate(targets, axis=-1)  # [N,]
                self.outputsraw = np.concatenate(
                    outputsraw, axis=-1)  # [n_out_classes, N,]
                self.outputs = np.concatenate(outputs, axis=-1)  # [N,]

            elif self.run_mode == "prediction":
                n_segments, n_classes, out_win_len = outputsraw.shape

                output_step = int(out_win_len / 2)

                if self.window == "hanning":
                    EPS = 0.001  # prevents divide-by-zero
                    arr_window = (1 - EPS) * np.hanning(out_win_len) + EPS
                elif self.window == "boxcar":
                    arr_window = np.ones((out_win_len, ))
                else:
                    raise ValueError()

                # Allocate space for merged predictions
                if self.has_null_class and not self.predict_null_class:
                    outputsraw2 = np.zeros(
                        (n_segments + 1, n_classes - 1, output_step, 2))
                    window2 = np.zeros(
                        (n_segments + 1, n_classes - 1, output_step,
                         2))  # [N+1, out_win_len/2, 2]
                    # Drop in outputs/window vals in the two layers
                    outputsraw = outputsraw[:, 1:, :]
                else:
                    outputsraw2 = np.zeros(
                        (n_segments + 1, n_classes, output_step, 2))
                    window2 = np.zeros((n_segments + 1, n_classes, output_step,
                                        2))  # [N+1, out_win_len/2, 2]

                # Drop in outputs/window vals in the two layers
                outputsraw2[:-1, :, :, 0] = outputsraw[:, :, :output_step]
                outputsraw2[1:, :, :,
                            1] = outputsraw[:, :, output_step:output_step * 2]
                window2[:-1, :, :, 0] = arr_window[:output_step]
                window2[1:, :, :, 1] = arr_window[output_step:output_step * 2]

                merged_outputsraw = (outputsraw2 * window2).sum(
                    axis=3) / (window2).sum(axis=3)
                softmaxed_merged_outputsraw = softmax(merged_outputsraw,
                                                      axis=1)
                merged_outputs = np.argmax(softmaxed_merged_outputsraw, 1)

                self.outputsraw = np.concatenate(merged_outputsraw, axis=-1)
                self.outputs = np.concatenate(merged_outputs, axis=-1)
                self.targets = np.concatenate(
                    np.concatenate(
                        [
                            targets[:, :output_step],
                            targets[[-1], output_step:output_step * 2],
                        ],
                        axis=0,
                    ),
                    axis=-1,
                )
            else:
                raise ValueError()

        if self.has_null_class and not self.predict_null_class:
            not_null_mask = self.targets > 0
            self.outputsraw = self.outputsraw[..., not_null_mask]
            self.outputs = self.outputs[not_null_mask]
            self.targets = self.targets[not_null_mask]
            self.targets -= 1

        self.n_samples_in = np.prod(dl.dataset.tensors[1].shape)
        self.n_samples_out = len(self.outputs)
        self.infer_samples_per_s = self.n_samples_in / self.infer_time_s_wall
        self.run_time_s_cpu = tr.interval_cpu
        self.run_time_s_wall = tr.interval_wall

    loss: float = t.Float()
    targets: np.ndarray = t.Array()
    outputsraw: np.ndarray = t.Array()
    outputs: np.ndarray = t.Array()
    n_samples_in: int = t.Int()
    n_samples_out: int = t.Int()
    infer_samples_per_s: float = t.Float()

    infer_time_s_cpu: float = t.Float()
    infer_time_s_wall: float = t.Float()
    run_time_s_cpu: float = t.Float()
    run_time_s_wall: float = t.Float()

    extra: dict = t.Dict({})

    acc: float = t.Float()
    f1: float = t.Float()
    f1_mean: float = t.Float()
    event_f1: float = t.Float()
    classification_report_txt: str = t.Str()
    classification_report_dict: dict = t.Dict()
    classification_report_df: pd.DataFrame = t.Property(
        t.Instance(pd.DataFrame))
    confusion_matrix: np.ndarray = t.Array()

    nonull_acc: float = t.Float()
    nonull_f1: float = t.Float()
    nonull_f1_mean: float = t.Float()
    nonull_classification_report_txt: str = t.Str()
    nonull_classification_report_dict: dict = t.Dict()
    nonull_classification_report_df: pd.DataFrame = t.Property(
        t.Instance(pd.DataFrame))
    nonull_confusion_matrix: np.ndarray = t.Array()

    def calc_metrics(self):

        self.acc = sklearn.metrics.accuracy_score(self.targets, self.outputs)
        self.f1 = sklearn.metrics.f1_score(self.targets,
                                           self.outputs,
                                           average="weighted")
        self.f1_mean = sklearn.metrics.f1_score(self.targets,
                                                self.outputs,
                                                average="macro")

        self.classification_report_txt = sklearn.metrics.classification_report(
            self.targets,
            self.outputs,
            digits=3,
            labels=np.arange(len(self.target_names)),
            target_names=self.target_names,
        )
        self.classification_report_dict = sklearn.metrics.classification_report(
            self.targets,
            self.outputs,
            digits=3,
            output_dict=True,
            labels=np.arange(len(self.target_names)),
            target_names=self.target_names,
        )
        self.confusion_matrix = sklearn.metrics.confusion_matrix(
            self.targets, self.outputs)

        # Now, ignoring the null/none class:
        if self.has_null_class and self.predict_null_class:
            # assume null class comes fistnonull_mask = self.targets > 0
            nonull_mask = self.targets > 0
            nonull_targets = self.targets[nonull_mask]
            # nonull_outputs = self.outputs[nonull_mask]
            nonull_outputs = self.outputsraw[1:, :].argmax(
                axis=0)[nonull_mask] + 1

            self.nonull_acc = sklearn.metrics.accuracy_score(
                nonull_targets, nonull_outputs)
            self.nonull_f1 = sklearn.metrics.f1_score(nonull_targets,
                                                      nonull_outputs,
                                                      average="weighted")
            self.nonull_f1_mean = sklearn.metrics.f1_score(nonull_targets,
                                                           nonull_outputs,
                                                           average="macro")
            self.nonull_classification_report_txt = sklearn.metrics.classification_report(
                nonull_targets,
                nonull_outputs,
                digits=3,
                labels=np.arange(len(self.target_names)),
                target_names=self.target_names,
            )
            self.nonull_classification_report_dict = sklearn.metrics.classification_report(
                nonull_targets,
                nonull_outputs,
                digits=3,
                output_dict=True,
                labels=np.arange(len(self.target_names)),
                target_names=self.target_names,
            )
            self.nonull_confusion_matrix = sklearn.metrics.confusion_matrix(
                nonull_targets, nonull_outputs)
        else:
            self.nonull_acc = self.acc
            self.nonull_f1 = self.f1
            self.nonull_f1_mean = self.f1_mean
            self.nonull_classification_report_txt = self.classification_report_txt
            self.nonull_classification_report_dict = self.classification_report_dict
            self.nonull_confusion_matrix = self.confusion_matrix

    ward_metrics: WardMetrics = t.Instance(WardMetrics)

    def calc_ward_metrics(self):
        """ Do event-wise metrics, using the `wardmetrics` package which implements metrics from:

         [1]    J. A. Ward, P. Lukowicz, and H. W. Gellersen, “Performance metrics for activity recognition,”
                    ACM Trans. Intell. Syst. Technol., vol. 2, no. 1, pp. 1–23, Jan. 2011.
        """

        import wardmetrics

        # Must be in prediction mode -- otherwise, data is not contiguous, ward metrics will be bogus
        assert self.run_mode == "prediction"

        targets = self.targets
        predictions = self.outputs

        wmetrics = WardMetrics()

        targets_events = wardmetrics.frame_results_to_events(targets)
        preds_events = wardmetrics.frame_results_to_events(predictions)

        for i, class_name in enumerate(self.target_names):
            class_wmetrics = ClassWardMetrics()

            t = targets_events.get(str(i), [])
            p = preds_events.get(str(i), [])
            # class_wmetrics['t'] = t
            # class_wmetrics['p'] = p

            try:
                assert len(t) and len(p)
                (
                    twoset_results,
                    segments_with_scores,
                    segment_counts,
                    normed_segment_counts,
                ) = wardmetrics.eval_segments(t, p)
                class_wmetrics.segment_twoset_results = twoset_results

                (
                    gt_event_scores,
                    det_event_scores,
                    detailed_scores,
                    standard_scores,
                ) = wardmetrics.eval_events(t, p)
                class_wmetrics.event_detailed_scores = detailed_scores
                class_wmetrics.event_standard_scores = standard_scores
            except (AssertionError, ZeroDivisionError) as e:
                class_wmetrics.segment_twoset_results = {}
                class_wmetrics.event_detailed_scores = {}
                class_wmetrics.event_standard_scores = {}
                # print("Empty Results or targets for a class.")
                # raise ValueError("Empty Results or targets for a class.")

            wmetrics.class_ward_metrics.append(class_wmetrics)

        tt = []
        pp = []
        for i, class_name in enumerate(self.target_names):
            # skip null class for combined eventing:
            if class_name in ("", "Null"):
                continue

            if len(tt) or len(pp):
                offset = np.max(tt + pp) + 2
            else:
                offset = 0
            [(a + offset, b + offset) for (a, b) in t]

            t = targets_events.get(str(i), [])
            p = preds_events.get(str(i), [])

            tt += [(a + offset, b + offset) for (a, b) in t]
            pp += [(a + offset, b + offset) for (a, b) in p]

        t = tt
        p = pp

        class_wmetrics = ClassWardMetrics()
        assert len(t) and len(p)
        (
            twoset_results,
            segments_with_scores,
            segment_counts,
            normed_segment_counts,
        ) = wardmetrics.eval_segments(t, p)
        class_wmetrics.segment_twoset_results = twoset_results

        (
            gt_event_scores,
            det_event_scores,
            detailed_scores,
            standard_scores,
        ) = wardmetrics.eval_events(t, p)
        class_wmetrics.event_detailed_scores = detailed_scores
        class_wmetrics.event_standard_scores = standard_scores

        # Reformat as dataframe for easier calculations
        df = pd.DataFrame(
            [cm.event_standard_scores for cm in wmetrics.class_ward_metrics],
            index=self.target_names,
        )
        df.loc["all_nonull"] = class_wmetrics.event_standard_scores

        # Calculate F1's to summarize recall/precision for each class
        df["f1"] = (2 * (df["precision"] * df["recall"]) /
                    (df["precision"] + df["recall"]))
        df["f1 (weighted)"] = (
            2 * (df["precision (weighted)"] * df["recall (weighted)"]) /
            (df["precision (weighted)"] + df["recall (weighted)"]))

        # Load dataframes into dictionary output
        wmetrics.df_event_scores = df
        wmetrics.df_event_detailed_scores = pd.DataFrame(
            [cm.event_detailed_scores for cm in wmetrics.class_ward_metrics],
            index=self.target_names,
        )
        wmetrics.df_segment_2set_results = pd.DataFrame(
            [cm.segment_twoset_results for cm in wmetrics.class_ward_metrics],
            index=self.target_names,
        )
        wmetrics.overall_ward_metrics = class_wmetrics

        self.ward_metrics = wmetrics
        self.event_f1 = self.ward_metrics.df_event_scores.loc["all_nonull",
                                                              "f1"]

    def _get_classification_report_df(self):
        df = pd.DataFrame(self.classification_report_dict).T

        # Include Ward-metrics-derived "Event F1 (unweighted by length)"
        if self.ward_metrics:
            df["event_f1"] = self.ward_metrics.df_event_scores["f1"]
        else:
            df["event_f1"] = np.nan

            # Calculate various summary averages
        df.loc["macro avg", "event_f1"] = df["event_f1"].iloc[:-3].mean()
        df.loc["weighted avg", "event_f1"] = (
            df["event_f1"].iloc[:-3] *
            df["support"].iloc[:-3]).sum() / df["support"].iloc[:-3].sum()

        df["support"] = df["support"].astype(int)

        return df

    def _get_nonull_classification_report_df(self):
        target_names = self.target_names
        if not (target_names[0] in ("", "Null")):
            return None

        df = pd.DataFrame(self.nonull_classification_report_dict).T

        df["support"] = df["support"].astype(int)

        return df

    def _save(self, checkpoint_dir=None):
        """ Saves/checkpoints model state and training state to disk. """
        if checkpoint_dir is None:
            checkpoint_dir = self.model_path

        os.makedirs(checkpoint_dir, exist_ok=True)

        # save model params
        evalmodel_path = os.path.join(checkpoint_dir, "evalmodel.pth")

        with open(evalmodel_path, "wb") as f:
            pickle.dump(self, f)

        return checkpoint_dir

    def _restore(self, checkpoint_dir=None):
        """ Restores model state and training state from disk. """

        if checkpoint_dir is None:
            checkpoint_dir = self.model_path

        evalmodel_path = os.path.join(checkpoint_dir, "evalmodel.pth")

        # Reconstitute old trainer and copy state to this trainer.
        with open(evalmodel_path, "rb") as f:
            other_evalmodel = pickle.load(f)

        self.__setstate__(other_evalmodel.__getstate__())

        self.trainer._restore(checkpoint_dir)
Beispiel #22
0
class Trainer(t.HasStrictTraits):
    model: models.BaseNet = t.Instance(torch.nn.Module, transient=True)

    def _model_default(self):

        # Merge 'base config' (if requested) and any overrides in 'model_config'
        if self.base_config:
            model_config = get_ref_arch(self.base_config)
        else:
            model_config = {}
        if self.model_config:
            model_config.update(self.model_config)
        if self.data_spec:
            model_config.update(
                {
                    "input_channels": self.data_spec["input_channels"],
                    "num_output_classes": [
                        s["num_classes"] for s in self.data_spec["output_spec"]
                    ],
                }
            )
        # create model accordingly
        model_class = getattr(models, self.model_class)
        return model_class(**model_config)

    base_config: str = t.Str()
    model_config: dict = t.Dict()
    model_class: str = t.Enum("FilterNet", "DeepConvLSTM")

    lr_exp: float = t.Float(-3.0)
    batch_size: int = t.Int()
    win_len: int = t.Int(512)
    n_samples_per_batch: int = t.Int(5000)
    train_step: int = t.Int(16)
    seed: int = t.Int()
    decimation: int = t.Int(1)
    optim_type: str = t.Enum(["Adam", "SGD, RMSprop"])
    loss_func: str = t.Enum(["cross_entropy", "binary_cross_entropy"])
    patience: int = t.Int(10)
    lr_decay: float = t.Float(0.95)
    weight_decay: float = t.Float(1e-4)
    alpha: float = t.Float(0.99)
    momentum: float = t.Float(0.25)
    validation_fold: int = t.Int()
    epoch_size: float = t.Float(2.0)
    y_cols: str = t.Str()
    sensor_subset: str = t.Str()

    has_null_class: bool = t.Bool()

    def _has_null_class_default(self):
        return self.data_spec["output_spec"][0]["classes"][0] in ("", "Null")

    predict_null_class: bool = t.Bool(True)

    _class_weights: torch.Tensor = t.Instance(torch.Tensor)

    def __class_weights_default(self):
        # Not weights for now because didn't seem to increase things significantly and
        #   added yet another hyper-parameter. Using zero didn't seem to work well.
        if False and self.has_null_class and not self.predict_null_class:
            cw = torch.ones(self.model.num_output_classes, device=self.device)
            cw[0] = 0.01
            cw /= cw.sum()
            return cw
        return None

    dataset: str = t.Enum(
        ["opportunity", "smartphone_hapt", "har", "intention_recognition"]
    )
    name: str = t.Str()

    def _name_default(self):
        import time

        modelstr = self.model.__class__.__name__
        timestr = time.strftime("%Y%m%d-%H%M%S")
        return f"{modelstr}_{timestr}"

    model_path: str = t.Str()

    def _model_path_default(self):
        return f"saved_models/{self.name}/"

    data_spec: dict = t.Any()
    epoch_iters: int = t.Int(0)
    train_state: TrainState = t.Instance(TrainState, ())
    cp_iter: int = t.Int()

    cuda: bool = t.Bool(transient=True)

    def _cuda_default(self):
        return torch.cuda.is_available()

    device: str = t.Str(transient=True)

    def _device_default(self):
        return "cuda" if self.cuda else "cpu"

    dl_train: DataLoader = t.Instance(DataLoader, transient=True)

    def _dl_train_default(self):
        return self._get_dl("train")

    dl_val: DataLoader = t.Instance(DataLoader, transient=True)

    def _dl_val_default(self):
        return self._get_dl("val")

    dl_test: DataLoader = t.Instance(DataLoader, transient=True)

    def _dl_test_default(self):
        return self._get_dl("test")

    def _get_dl(self, s):

        if self.dataset == "opportunity":
            from filternet.datasets.opportunity import get_x_y_contig
        elif self.dataset == "smartphone_hapt":
            from filternet.datasets.smartphone_hapt import get_x_y_contig
        elif self.dataset == "har":
            from filternet.datasets.har import get_x_y_contig
        elif self.dataset == "intention_recognition":
            from filternet.datasets.intention_recognition import get_x_y_contig
        else:
            raise ValueError(f"Unknown dataset {self.dataset}")

        kwargs = {}
        if self.y_cols:
            kwargs["y_cols"] = self.y_cols
        if self.sensor_subset:
            kwargs["sensor_subset"] = self.sensor_subset

        Xc, ycs, data_spec = get_x_y_contig(s, **kwargs)

        if s == "train":
            # Training shuffles, and we set epoch size to length of the dataset. We can set train_step as
            # small as we want to get more windows; we'll only run len(Sc)/win_len of them in each training
            # epoch.
            self.epoch_iters = int(len(Xc) / self.decimation)
            X, ys = sliding_window_x_y(
                Xc, ycs, win_len=self.win_len, step=self.train_step, shuffle=False
            )
            # Set the overall data spec using the training set,
            #  and modify later if more info is needed.
            self.data_spec = data_spec
        else:
            # Val and test data are not shuffled.
            # Each point is inferred ~twice b/c step = win_len/2
            X, ys = sliding_window_x_y(
                Xc,
                ycs,
                win_len=self.win_len,
                step=int(self.win_len / 2),
                shuffle=False,  # Cannot be true with windows
            )

        dl = DataLoader(
            TensorDataset(torch.Tensor(X), *[torch.Tensor(y).long() for y in ys]),
            batch_size=self.batch_size,
            shuffle=True if s == "train" else False,
        )
        return dl

    def _batch_size_default(self):
        batch_size = int(self.n_samples_per_batch / self.win_len)
        print(f"Batch size: {batch_size}")
        return batch_size

    optimizer = t.Any(transient=True)

    def _optimizer_default(self):
        if self.optim_type == "SGD":
            optimizer = torch.optim.SGD(
                self.model.parameters(),
                lr=10 ** (self.lr_exp),
                momentum=self.momentum,
                weight_decay=self.weight_decay,
            )
        elif self.optim_type == "Adam":
            optimizer = torch.optim.Adam(
                self.model.parameters(),
                lr=10 ** (self.lr_exp),
                weight_decay=self.weight_decay,
                amsgrad=True,
            )
        elif self.optim_type == "RMSprop":
            optimizer = torch.optim.RMSprop(
                self.model.parameters(),
                lr=10 ** (self.lr_exp),
                alpha=self.alpha,
                weight_decay=self.weight_decay,
                momentum=self.momentum,
            )
        else:
            raise NotImplementedError(self.optim_type)
        return optimizer

    iteration: int = t.Property(t.Int)

    def _get_iteration(self):
        return len(self.train_state.epoch_records) + 1

    lr_scheduler = t.Any(transient=True)

    def _lr_scheduler_default(self):
        lr_scheduler = torch.optim.lr_scheduler.ExponentialLR(
            self.optimizer, self.lr_decay  # , last_epoch=self._iteration
        )

        # If this is being re-instantiated in mid-training, then we must
        #  iterate scheduler forward to match the training step.
        for i in range(self.iteration):
            if self.lr_decay != 1:
                lr_scheduler.step()

        return lr_scheduler

    #####
    # Training Methods
    ##
    def _train_batch(self, data, targets):
        self.optimizer.zero_grad()
        loss, output, _targets, _ = self._run_model_on_batch(data, targets)
        loss.backward()
        self.optimizer.step()
        # if self.max_lr:
        #     self.lr_scheduler.step()

        return loss, output, _targets

    def _run_model_on_batch(self, data, targets):
        targets = torch.stack(targets)

        if self.cuda:
            data, targets = data.cuda(), targets.cuda()

        output = self.model(data)

        _targets = self.model.transform_targets(targets, one_hot=False)
        if self.loss_func == "cross_entropy":
            _losses = [
                F.cross_entropy(o, t, weight=self._class_weights)
                for o, t in zip(output, _targets)
            ]
            loss = sum(_losses)
        elif self.loss_func == "binary_cross_entropy":
            _targets_onehot = self.model.transform_targets(targets, one_hot=True)
            _losses = [
                F.binary_cross_entropy_with_logits(o, t, weight=self._class_weights)
                for o, t in zip(output, _targets_onehot)
            ]
            loss = sum(_losses)
        else:
            raise NotImplementedError(self.loss)

        # Assume only 1 output:

        return loss, output[0], _targets[0], _losses[0]

    def _calc_validation_loss(self):
        running_loss = 0
        self.model.eval()
        with torch.no_grad():
            for batch_idx, (data, *targets) in enumerate(self.dl_val):
                loss, _, _, _ = self._run_model_on_batch(data, targets)
                running_loss += loss.item() * data.size(0)

        return running_loss / len(self.dl_val.dataset)

    def _train_epoch(self):

        self.model.train()

        train_losses = []
        train_accs = []

        for batch_idx, (data, *targets) in enumerate(self.dl_train):
            if (
                batch_idx * data.shape[0] * data.shape[2]
                > self.epoch_iters * self.epoch_size
            ):
                # we've effectively finished one epoch worth of data; break!
                break

            batch_loss, batch_output, batch_targets = self._train_batch(data, targets)
            train_losses.append(batch_loss.detach().cpu().item())
            batch_preds = torch.argmax(batch_output, 1, False)
            train_accs.append(
                (batch_preds == batch_targets).detach().cpu().float().mean().item()
            )

        if self.lr_decay != 1:
            self.lr_scheduler.step()

        return EpochMetrics(loss=np.mean(train_losses), acc=np.mean(train_accs))

    def _val_epoch(self):
        return self._eval_epoch(self.dl_val)

    def _eval_epoch(self, data_loader):
        # Validation
        self.model.eval()

        losses = []
        outputs = []
        targets = []

        with torch.no_grad():
            for batch_idx, (data, *target) in enumerate(data_loader):
                (
                    batch_loss,
                    batch_output,
                    batch_targets,
                    train_losses,
                ) = self._run_model_on_batch(data, target)

                losses.append(batch_loss.detach().cpu().item())
                outputs.append(
                    torch.argmax(batch_output, 1, False)
                    .detach()
                    .cpu()
                    .data.numpy()
                    .flatten()
                )
                targets.append(batch_targets.detach().cpu().data.numpy().flatten())

        targets = np.hstack(targets)
        outputs = np.hstack(outputs)
        acc = sklearn.metrics.accuracy_score(targets, outputs)
        f1 = sklearn.metrics.f1_score(targets, outputs, average="weighted")

        return EpochMetrics(loss=np.mean(losses), acc=acc, f1=f1)

    def init_data(self):
        # Initiate loading of datasets, model
        _, _, _ = self.dl_train, self.dl_val, self.dl_test
        _ = self.model

    def init_train(self):

        # initialization
        if self.seed:
            torch.manual_seed(self.seed)
        if self.cuda:
            if self.seed:
                torch.cuda.manual_seed(self.seed)
        self.model.to(self.device)

    def train_one_epoch(self, verbose=True) -> EpochRecord:
        """ traing a single epoch -- method tailored to the Ray.tune methodology."""
        epoch_record = EpochRecord(epoch=len(self.train_state.epoch_records))
        self.train_state.epoch_records.append(epoch_record)

        with Timer("Train Epoch", log_output=verbose) as t:
            epoch_record.train = self._train_epoch()
        epoch_record.iter_s_cpu = t.interval_cpu
        epoch_record.iter_s_wall = t.interval_wall
        epoch_record.lr = self.optimizer.param_groups[0]["lr"]

        with Timer("Val Epoch", log_output=verbose):
            epoch_record.val = self._val_epoch()

        df = self.train_state.to_df()

        # Early stopping / checkpointing implementation
        df["raw_metric"] = df.val_loss / df.val_f1
        df["ewma_smoothed_loss"] = (
            df["raw_metric"].ewm(ignore_na=False, halflife=3).mean()
        )
        df["instability_penalty"] = (
            df["raw_metric"].rolling(5, min_periods=3).std().fillna(0.75)
        )
        stopping_metric = df["stopping_metric"] = (
            df["ewma_smoothed_loss"] + df["instability_penalty"]
        )
        epoch_record.stopping_metric = df["stopping_metric"].iloc[-1]

        idx_this_iter = stopping_metric.index.max()
        idx_best_yet = stopping_metric.idxmin()
        self.train_state.best_sm = df.loc[idx_best_yet, "stopping_metric"]
        self.train_state.best_loss = df.loc[idx_best_yet, "val_loss"]
        self.train_state.best_f1 = df.loc[idx_best_yet, "val_f1"]

        if idx_best_yet == idx_this_iter:
            # Best yet! Checkpoint.
            epoch_record.should_checkpoint = True
            self.cp_iter = epoch_record.epoch

        else:
            if self.patience is not None:
                patience_counter = idx_this_iter - idx_best_yet
                assert patience_counter >= 0
                if patience_counter > self.patience:
                    if verbose:
                        print(
                            f"Early stop! Out of patience ( {patience_counter} > {self.patience} )"
                        )
                    epoch_record.done = True

        if verbose:
            self.print_train_summary()

        return epoch_record

    def train(self, max_epochs=50, verbose=True):
        """ A pretty standard training loop, constrained to stop in `max_epochs` but may stop early if our
        custom stopping metric does not improve for `self.patience` epochs. Always checkpoints
        when a new best stopping_metric is achieved. An alternative to using
        ray.tune for training."""

        self.init_data()
        self.init_train()

        while True:
            epoch_record = self.train_one_epoch(verbose=verbose)

            if epoch_record.should_checkpoint:
                last_cp = self._save()
                if verbose:
                    print(f"<<<< Checkpointed ({last_cp}) >>>")
            if epoch_record.done:
                break
            if epoch_record.epoch >= max_epochs:
                break

        # Save trainer state, but not model"
        self._save(save_model=False)
        if verbose:
            print(self.model_path)

    def print_train_summary(self):
        df = self.train_state.to_df()

        with pd.option_context(
            "display.max_rows",
            100,
            "display.max_columns",
            100,
            "display.precision",
            3,
            "display.width",
            180,
        ):
            print(df.drop(["done"], axis=1, errors="ignore"))

    def _save(self, checkpoint_dir=None, save_model=True, save_trainer=True):
        """ Saves/checkpoints model state and training state to disk. """
        if checkpoint_dir is None:
            checkpoint_dir = self.model_path
        else:
            self.model_path = checkpoint_dir

        os.makedirs(checkpoint_dir, exist_ok=True)

        # save model params
        model_path = os.path.join(checkpoint_dir, "model.pth")
        trainer_path = os.path.join(checkpoint_dir, "trainer.pth")

        if save_model:
            torch.save(self.model.state_dict(), model_path)
        if save_trainer:
            with open(trainer_path, "wb") as f:
                pickle.dump(self, f)

        return checkpoint_dir

    def _restore(self, checkpoint_dir=None):
        """ Restores model state and training state from disk. """

        if checkpoint_dir is None:
            checkpoint_dir = self.model_path

        model_path = os.path.join(checkpoint_dir, "model.pth")
        trainer_path = os.path.join(checkpoint_dir, "trainer.pth")

        # Reconstitute old trainer and copy state to this trainer.
        with open(trainer_path, "rb") as f:
            other_trainer = pickle.load(f)

        self.__setstate__(other_trainer.__getstate__())

        # Load model (after loading state in case we need to re-initialize model from config)
        self.model.load_state_dict(torch.load(model_path, map_location=self.device))

        # Be careful to reinitialize optimizer and lr scheduler
        self.optimizer = self._optimizer_default()
        self.lr_scheduler = self._lr_scheduler_default()
Beispiel #23
0
class EEGSensor(t.HasTraits):
    preferences = t.Any()
    connected = t.Bool(False)
    serial_port = t.Instance(serial.Serial)

    com_port = t.Int()  # If None, we just search for it.

    def _com_port_default(self):
        """ Get default com port from preferences. """
        if self.preferences:
            return int(
                self.preferences.get('sensor.com_port', COM_SEARCH_START))
        return t.undefined

    def _com_port_changed(self, val):
        """ Save any COM port changes to preferences. """
        if self.preferences:
            return self.preferences.set('sensor.com_port', val)

    channels = t.Int(CHANNELS)
    timeseries = t.Array(dtype='float', value=np.zeros([1, CHANNELS + 1]))
    history = t.List()
    data_changed = t.Event()

    # Below, these separate properties and buttons for each channel are a bit
    #  verbose, but it seems to be the most clear way to implement this.
    channel_1_enabled = t.Bool(False)
    channel_2_enabled = t.Bool(False)
    channel_3_enabled = t.Bool(False)
    channel_4_enabled = t.Bool(False)
    channel_5_enabled = t.Bool(False)
    channel_6_enabled = t.Bool(False)
    channel_7_enabled = t.Bool(False)
    channel_8_enabled = t.Bool(False)

    channel_1_on = t.Button()
    channel_2_on = t.Button()
    channel_3_on = t.Button()
    channel_4_on = t.Button()
    channel_5_on = t.Button()
    channel_6_on = t.Button()
    channel_7_on = t.Button()
    channel_8_on = t.Button()

    channel_1_off = t.Button()
    channel_2_off = t.Button()
    channel_3_off = t.Button()
    channel_4_off = t.Button()
    channel_5_off = t.Button()
    channel_6_off = t.Button()
    channel_7_off = t.Button()
    channel_8_off = t.Button()

    # Properties
    history_length = t.Property(t.Int, depends_on="data_changed")

    def _get_history_length(self):
        return len(self.history)

    timeseries_length = t.Property(t.Int, depends_on="data_changed")

    def _get_timeseries_length(self):
        return self.timeseries.shape[0]

    @t.on_trait_change(','.join(['channel_%d_on' % i for i in range(1, 9)] +
                                ['channel_%d_off' % i for i in range(1, 9)]))
    def toggle_channels(self, name, new):
        if not self.connected:
            return
        deactivate_codes = ['1', '2', '3', '4', '5', '6', '7', '8']
        activate_codes = ['q', 'w', 'e', 'r', 't', 'y', 'u', 'i']
        if name.endswith('_off'):
            cmd = deactivate_codes[int(name[-len('_off') - 1]) - 1]
        elif name.endswith('_on'):
            cmd = activate_codes[int(name[-len('_on') - 1]) - 1]
        else:
            raise ValueError()
        self.serial_port.write(cmd + '\n')
        # self.serial_port.write('b\n')
        time.sleep(.100)
        # self.serial_port.flushInput()
        # time.sleep(.50)

    def connect(self):
        if self.connected:
            self.disconnect()

        assert self.serial_port is None

        # If no com port is selected, search for it... this search code could
        #  be drastically sped up by analyzing a listing of actual COM ports.
        try:
            if self.com_port is None:
                for i in range(COM_SEARCH_START, COM_SEARCH_END + 1):
                    try:
                        port = 'COM%d' % i
                        self.serial_port = serial.Serial(
                            port, BAUD, timeout=STARTUP_TIMEOUT)
                        if self.serial_port.read(1) == '':
                            self.serial_port.close()
                            self.serial_port = None
                            continue
                        else:
                            # Assume it's the right one...
                            self.serial_port.write('s\n')  # Reset.
                            self.serial_port.write(
                                'b\n')  # Start sending binary.
                            self.serial_port.read(
                                5)  # Make sure we can read something
                            # Okay, we're convinced.
                            self.com_port = i
                            self.connected = True
                            self.serial_port.timeout = RUN_TIMEOUT
                            break
                    except serial.SerialException, e:
                        logging.warn("Couldn't open %s: %s" % (port, str(e)))
                else:
                    logging.warn("Couldn't find a functioning serial port." %
                                 (port, str(e)))

            else:  # A specific COM port is requested.
Beispiel #24
0
class HistPlot(DescStatBasePlot):

    row_id = _traits.Any()
    ceiling = _traits.Int()
    head_space = _traits.Float(1.1)
    bars_renderer = _traits.Instance(_chaco.BarPlot)

    def __init__(self, ds, row_id):
        super(HistPlot, self).__init__(ds=ds, row_id=row_id)
        self.bars_renderer = self._create_render()
        self.add(self.bars_renderer)
        self._add_axis(self.bars_renderer)
        fraction = self._calc_percentage()
        self._add_data_labels(self.bars_renderer, fraction)

    def render_hist(self, row_id):
        self.row_id = row_id
        self.bars_renderer.value.set_data(self.ds.mat.ix[self.row_id].values)

    def _create_render(self):
        # Create our data sources
        idx = _chaco.ArrayDataSource(_np.arange(self.ds.n_vars))
        vals = _chaco.ArrayDataSource(self.ds.mat.ix[self.row_id].values)

        # Create the index range
        index_range = _chaco.DataRange1D(idx,
                                         tight_bounds=False,
                                         low_setting='auto',
                                         margin=0.15)
        index_mapper = _chaco.LinearMapper(range=index_range)

        # Create the value range
        value_range = _chaco.DataRange1D(vals,
                                         low_setting=0,
                                         high_setting=self.ceiling)
        value_mapper = _chaco.LinearMapper(range=value_range)

        # Create the plot
        bars = _chaco.BarPlot(index=idx,
                              value=vals,
                              value_mapper=value_mapper,
                              index_mapper=index_mapper,
                              line_color='black',
                              fill_color='springgreen',
                              bar_width=0.8,
                              antialias=False)

        return bars

    def _calc_percentage(self):
        hist = self.ds.mat.ix[self.row_id].values
        alt = hist.sum(axis=0)
        pec = hist * 100 / alt
        return pec

    def _add_data_labels(self, renderer, fraction):
        idx = renderer.index._data
        val = renderer.value._data
        for i, v in enumerate(fraction):
            label = _chaco.DataLabel(
                component=renderer,
                data_point=(idx[i], val[i]),
                label_text="{}%".format(v),
                marker_visible=False,
                border_visible=False,
                show_label_coords=False,
                bgcolor=(0.5, 0.5, 0.5, 0.0),
            )
            renderer.overlays.append(label)

    def _add_axis(self, renderer):
        left_axis = _chaco.PlotAxis(renderer,
                                    orientation='left',
                                    title='Number of consumers')
        bottom_axis = _chaco.LabelAxis(
            renderer,
            orientation='bottom',
            title='Consumer rating',
            positions=range(self.ds.n_vars),
            labels=[str(vn) for vn in self.ds.var_n],
            tick_interval=1.0,
        )
        renderer.underlays.append(left_axis)
        renderer.underlays.append(bottom_axis)

    def _ceiling_default(self):
        top = self.ds.values.max()
        return int(top * self.head_space)

    def new_window(self, configure=False):
        """Convenience function that creates a window containing the Plot

        Don't call this if the plot is already displayed in a window.
        """
        from chaco.ui.plot_window import PlotWindow
        if configure:
            self._plot_ui_info = PlotWindow(plot=self).configure_traits()
        else:
            self._plot_ui_info = PlotWindow(plot=self).edit_traits()
        return self._plot_ui_info

    def get_plot_name(self):
        dsn = self.ds.display_name[19:]
        return u"Histogram plot: {0} - {1}".format(dsn, self.row_id)