def export_ticket_types(self, template_path):
        """Export ticket types by saving type JSON data in ticket.xml file.
        
        Creates a dictionary where each key is a ticket type and the value
        is ticket type information. We then iterate over this to create a XML
        file using ElementTree lib.
        """

        # a list to return to the template with info about transaction
        successful_exports = list()

        types = [ticket_type.name for ticket_type in model.Type.select(self.env)]

        ticket_types_dict = dict()
        controller = LogicaOrderController(self.env)
        for ticket_type in types:
            # using a _method() is a bit naughty
            ticket_types_dict[ticket_type] = controller._serialize_ticket_type(ticket_type)

        # create the XML tree
        self.log.info("Creating ticket type XML file for template archive")

        root = ET.Element("ticket_types", project=self.env.project_name, date=datetime.date.today().isoformat())
        for type_name, type_info in ticket_types_dict.iteritems():
            ET.SubElement(root, "type_name", name=type_name).text = type_info
            successful_exports.append(type_name)

        # create the xml file
        filename = os.path.join(template_path, 'ticket.xml')
        ET.ElementTree(root).write(filename)
        self.log.info("File %s has been created at %s" % (filename, template_path))

        return successful_exports
    def closed_statuses_for_all_types(self):
        """Returns a dictionary where the keys are tickets types and the associated
        values are statuses from workflow status groups where closed='True'. 

        Essentially if a ticket is in one of these statuses, we consider it closed
        and from this infer that no more work is required to complete the ticket.
        """

        controller = LogicaOrderController(self.env)
        closed_statuses = controller.type_and_statuses_for_closed_statusgroups()
        types_and_statuses = []
        for type_, statuses in closed_statuses.iteritems():
            types_and_statuses.append(type_)
            types_and_statuses.extend(statuses)

        return closed_statuses, types_and_statuses
    def _get_ticket_data(self, req, results):
        ats = AgileToolsSystem(self.env)
        loc = LogicaOrderController(self.env)
        closed_statuses = loc.type_and_statuses_for_closed_statusgroups()

        # TODO calculate which statuses are closed using the query system
        # when it is able to handle this
        tickets = []
        for result in results:
            if result['status'] not in closed_statuses[result['type']]:
                filtered_result = dict((k, v)
                                   for k, v in result.iteritems()
                                   if k in self.fields)

                if "remaininghours" in filtered_result:
                    try:
                        hours = float(filtered_result["remaininghours"])
                    except (ValueError, TypeError):
                        hours = 0
                    del filtered_result["remaininghours"]
                else:
                    hours = 0

                if "effort" in filtered_result:
                    try:
                        storypoints = float(filtered_result['effort'])
                    except (ValueError, TypeError):
                        storypoints = 0
                else:
                    storypoints = 0

                reporter = filtered_result["reporter"]
                session = DetachedSession(self.env, reporter)

                filtered_result.update({
                    'id': result['id'],
                    'position': ats.position(result['id']),
                    'hours': hours,
                    'effort': storypoints,
                    'reporter': session.get('name', reporter),
                    'changetime': to_utimestamp(filtered_result['changetime'])
                    })

                tickets.append(filtered_result)

        return tickets
    def import_ticket_types(self, template_path):
        """Imports ticket types from ticket.xml template file.

        Create ticket types using the import functionality from 
        LogicaOrderController and data from a ticket type template XML.
        """

        # get ticket info in JSON format from XML file
        controller = LogicaOrderController(self.env)

        self.log.info("Creating ticket types from template")
        path = os.path.join(template_path, 'ticket.xml')
        try:
            tree = ET.ElementTree(file=path)
            for ticket in tree.getroot():
                # using a _method() is a bit naughty
                controller._import_ticket_type(ticket.text, dry_run=False)
        except IOError as exception:
            if exception.errno == errno.ENOENT:
                self.log.info("Path to ticket.xml at %s does not exist. "
                              "Unable to import tickets from tempalte.", path)
    def _get_status_data(self, req, milestone, field, results, fields):
        """Get data grouped by WORKFLOW and status.

        It's not possible to show tickets in different workflows on the same
        taskboard, so we create an additional outer group for workflows.
        We then get the workflow with the most tickets, and show that first"""
        ats = AgileToolsSystem(self.env)
        loc = LogicaOrderController(self.env)

        # Data for status much more complex as we need to track the workflow
        tickets_json = defaultdict(lambda: defaultdict(dict))
        by_type = defaultdict(int)
        by_wf = defaultdict(int)
        wf_for_type = {}

        # Store the options required in order to complete an action
        # E.g. closing a ticket requires a resolution
        act_controls = {}

        for r in results:
            # Increment type statistics
            by_type[r['type']] += 1
            tkt = Ticket(self.env, r['id'])
            if r['type'] not in wf_for_type:
                wf_for_type[r['type']] = \
                    loc._get_workflow_for_typename(r['type'])
            wf = wf_for_type[r['type']]

            state = loc._determine_workflow_state(tkt, req=req)
            op = Operation(self.env, wf, state)
            filtered = dict((k, v)
                            for k, v in r.iteritems()
                            if k in fields)
            filtered['position'] = ats.position(r['id'])
            filtered['_changetime'] = to_utimestamp(r['changetime'])
            # we use Trac's to_json() (through add_script_data), so
            # we'll replace any types which can't be json serialised
            for k, v in filtered.items():
                if isinstance(v, datetime): filtered[k] = pretty_age(v)
            filtered['actions'] = self._get_status_actions(req, op, wf, state)
            # Collect all actions requiring further input
            self._update_controls(req, act_controls, filtered['actions'], tkt)

            tickets_json[wf.name][r["status"]][r["id"]] = filtered

        # Calculate number of tickets per workflow
        for ty in by_type:
            by_wf[wf_for_type[ty]] += by_type[ty]

        wf_statuses = dict((wf.name, wf.ordered_statuses) for wf in by_wf)

        # Retrieve Kanban-style status limits
        db = self.env.get_read_db()
        cursor = db.cursor()
        cursor.execute("""
            SELECT status, hardlimit FROM kanban_limits
            WHERE milestone = %s""", (milestone,))
        status_limits = dict((limit[0], limit[1]) for limit in cursor)

        # Initially show the most used workflow
        show_first = max(by_wf, key=lambda n: by_wf[n]).name
        return ("status", tickets_json, wf_statuses, status_limits, show_first, act_controls)
        def _capture(db):
            def calculate_initial_values_for_ticket(ticket_id):
                # first seen changes will be from the very first information we have about this ticket
                initial_values_cursor = db.cursor()
                initial_values_cursor.execute("SELECT time FROM ticket WHERE id = %s", (ticket_id,))
                ticket_created = from_utimestamp(initial_values_cursor.fetchone()[0])
                history_date = ticket_created.date()

                # find original values for the ticket
                for column in ticket_values.keys():
                    initial_values_cursor.execute("SELECT oldvalue FROM ticket_change WHERE ticket = %s AND field = %s ORDER BY time LIMIT 1",  
                                   (ticket_id, column))
                    result = initial_values_cursor.fetchone()
                    if result is None:
                        if column in built_in_fields:
                            initial_values_cursor.execute("SELECT %s FROM ticket WHERE id = %%s" % column, (ticket_id,))
                            result = initial_values_cursor.fetchone()
                            ticket_values[column] = encode_and_escape(result[0])
                        else:
                            initial_values_cursor.execute("SELECT value FROM ticket_custom WHERE ticket = %s AND name = %s", (ticket_id, column))
                            result = initial_values_cursor.fetchone()
                            if result:
                                ticket_values[column] = encode_and_escape(result[0])
                            else:
                                ticket_values[column] = r'\N'
                    else:
                        ticket_values[column] = encode_and_escape(result[0])

                ticket_values['id'] = str(ticket_id)
                ticket_values['time'] = str(ticket_created)
                ticket_values['changetime'] = str(ticket_created)
                ticket_values['_resolutiontime'] = r'\N'
                # assumption that you cannot create a ticket in status closed
                # so we give isclosed a false value from the off
                ticket_values['isclosed'] = "0"

                # PROBLEM: How can we detect when a milestone was renamed (and
                # tickets updated) - this isn't mentioned in the ticket_change
                # table.
                # Maybe we have to search the log file for strings?!  
                # source:trunk/trac/trac/ticket/model.py@8937#L1192

                return ticket_values, ticket_created, history_date

            last_snapshot_cursor = db.cursor()
            last_snapshot_cursor.execute("SELECT _snapshottime FROM ticket_bi_historical ORDER BY _snapshottime DESC LIMIT 1")
            last_snapshot_result = last_snapshot_cursor.fetchone()
            del last_snapshot_cursor
            if last_snapshot_result:
                last_snapshot = last_snapshot_result[0]
                print "Last successful run was at %s" % last_snapshot

                if until <= last_snapshot:
                    print "Already have data for %s, so can't run with until=%s" % (last_snapshot, until)
                    return False
            else:
                last_snapshot = None
                print "No previous runs"

            # Get statuses we consider to be closed for each ticket type
            controller = LogicaOrderController(self.env)
            closed_statuses = controller.type_and_statuses_for_closed_statusgroups()

            if only_ticket:
                ticket_ids = [(int(only_ticket),)]
            else:
                ticket_ids_c = db.cursor()
                ticket_ids_c.execute("SELECT id FROM ticket ORDER BY id")
                ticket_ids = ticket_ids_c.fetchall()

            insert_cursor = db.cursor()
            copy_data_buffer = StringIO()

            eta_prediction__start_time = datetime.datetime.now()
            eta_prediction__done  = 0
            eta_prediction__total = len(ticket_ids)
            for ticket_id, in ticket_ids:
                try:
                    eta_prediction = eta_prediction__start_time + ((datetime.datetime.now() - eta_prediction__start_time) / eta_prediction__done) * eta_prediction__total
                except ZeroDivisionError:
                    eta_prediction = None
                self.log.info("Working on (after) %s to (end of) %s for ticket %d/%d - ETA %s", 
                              last_snapshot,
                              until,
                              ticket_id,
                              eta_prediction__total,
                              eta_prediction)
                eta_prediction__done += 1

                # set up a dictionary to hold the value of the ticket fields, which will change as we step forward in time
                ticket_values = proto_values.copy()
                
                # populate the "initial" values
                if last_snapshot:
                    history_date = last_snapshot + datetime.timedelta(days=1)
                    last_snapshot_cursor = db.cursor()
                    # we add ticket fields and history columns otherwise 
                    # we don't get previous values such as isclosed
                    columns = ticket_values.keys() + history_columns
                    last_snapshot_cursor.execute("SELECT %s FROM ticket_bi_historical "
                                                 "WHERE id = %%s AND _snapshottime = %%s "
                                                 "LIMIT 1 "
                                                 %  ",".join(columns), 
                                                 (ticket_id, last_snapshot))

                    values = last_snapshot_cursor.fetchone()
                    if not values:
                        self.log.warn("No historical data for ticket %s on %s?", ticket_id, last_snapshot)
                        ticket_values, ticket_created, history_date = calculate_initial_values_for_ticket(ticket_id)
                    else:
                        ticket_values.update(dict(zip(columns, values)))

                        # original storage for custom_fields can only store strings, so pretend we had a string
                        for k, v in ticket_values.items():
                            # everything wants to be a string as we use COPY to insert to the database
                            if k in custom_fields and not v:
                                ticket_values[k] = ''
                            else:
                                ticket_values[k] = encode_and_escape(v)

                        ticket_values['id'] = str(ticket_id)

                else:
                    # first time we've run the history capture script
                    ticket_values, ticket_created, history_date = calculate_initial_values_for_ticket(ticket_id)

                # now we're going to get a list of all the changes that this ticket goes through

                ticket_changes_cursor = db.cursor()
                ticket_changes_cursor.execute("SELECT time, field, newvalue "
                               "FROM ticket_change "
                               "WHERE ticket = %%s AND field in (%s) "
                               "AND time >= %%s AND time < %%s "
                               "ORDER BY time "
                               % db.parammarks(len(ticket_values)),
                               [ticket_id]
                               + ticket_values.keys()
                               + [memoized_to_utimestamp(startofday(history_date)),
                                  memoized_to_utimestamp(startofnextday(until)),
                                  ]
                               )
                ticket_changes =[(from_utimestamp(time), field, newvalue)
                                 for time, field, newvalue in ticket_changes_cursor]

                # and then we'll update 'ticket_values' to make a representation of the ticket for the end of each day, and store that into the history database

                def _calculate_totalhours_on_date(date, cursor):
                    cursor.execute("SELECT SUM(seconds_worked)/3600.0 FROM ticket_time WHERE ticket = %s AND time_started < %s",
                              (ticket_values['id'],
                               memoized_to_timestamp(startofnextday(date))))
                    result = cursor.fetchone()
                    return result[0] if result else 0

                def _calculate_remaininghours_on_date(date, cursor):
                    # find the closest absolute value
                    nextdate = startofnextday(date)
                    cursor.execute("SELECT to_timestamp(time / 1000000), oldvalue FROM ticket_change WHERE "
                              "field = 'remaininghours' AND ticket = %s AND time >= %s ORDER BY time ASC LIMIT 1",
                              (ticket_values['id'],
                               memoized_to_utimestamp(nextdate)))
                    next_known = cursor.fetchone()
                    cursor.execute("SELECT to_timestamp(time / 1000000), newvalue FROM ticket_change WHERE "
                              "field = 'remaininghours' AND ticket = %s AND time < %s ORDER BY time DESC LIMIT 1",
                              (ticket_values['id'],
                               memoized_to_utimestamp(nextdate)))
                    previous_known = cursor.fetchone()
                    cursor.execute("SELECT now(), value FROM ticket_custom WHERE ticket = %s AND name = 'remaininghours'",
                              (ticket_values['id'],))
                    currently = cursor.fetchone()
                    self.log.debug("Finding remaininghours for end of %s", date)
                    self.log.debug("Previous known value: %s", previous_known)
                    self.log.debug("Current known value: %s", currently)
                    self.log.debug("Next known value: %s", next_known)
                    candidates = []
                    try:
                        candidates.append((currently[0] - nextdate, currently[0], float(currently[1])))
                    except (TypeError, ValueError), e:
                        self.log.warning("Invalid float in %s for remaininghours on ticket %s", 
                                         currently, ticket_values['id'])
                    if next_known:
                        try:
                            candidates.append((next_known[0] - nextdate, next_known[0], float(next_known[1])))
                        except (TypeError, ValueError), e:
                            self.log.warning("Invalid float for next_known in %s for remaininghours on ticket %s", 
                                             next_known, ticket_values['id'])
                    if previous_known:
                        try:
                            candidates.append((nextdate - previous_known[0], previous_known[0], float(previous_known[1])))
                        except (TypeError, ValueError), e:
                            self.log.warning("Invalid float for previous_known in %s for remaininghours on ticket %s", 
                                             previous_known, ticket_values['id'])