Exemple #1
0
    def restore(self, backup_id, user_id):
        """
        Restores the database dump over the existing setup

        :param backup_id: Backup identifier, as an integer
        :param user_id: Id the user who did the restore

        Returns:
            A dictionary of the restored backup
        """
        backup = {}

        assert isinstance(user_id, long), 'User id needs to be long integer'
        assert isinstance(backup_id, long), 'Backup id needs to be long integer'

        # Create dictionary containing the info about the backup
        backup = {'id':backup_id, 'restored':datetime.utcnow(), 'restored_by':user_id}

        # Open the db connection for adding the restore information, if any of the operations fail,
        # the database transaction will be rolled back in the context manager
        with admin_transaction() as cursor:
            # Update restore into to project_backup table. Use result count to check if the id was
            # actually found or not
            query = '''
                UPDATE project_backup
                SET restored=%s, restored_by=%s
                WHERE id = %s
            '''
            cursor.execute(query, (backup['restored'], backup['restored_by'] , backup['id']))

            # Check if the backup_id was actually found?
            if not cursor.rowcount:
                raise TracError('Backup cannot be found')

            # Do the actual database restore
            try:
                mysqlp = self._get_mysql_process(self.env)
            except OSError, e:
                raise TracError(_("Unable to run mysql command: %(msg)s", msg=exception_to_unicode(e)))

            # Pass the backup into stdin
            backup_path = self.backup_path_tmpl % (self.project.env_name, backup['id'])

            if not os.path.exists(backup_path):
                conf.log.error('User failed to restore project backup')
                raise TracError(_('Backup file cannot be found'))

            with open(backup_path, 'r+b') as backup_input:
                errmsg = mysqlp.communicate(input=backup_input.read())

            if mysqlp.returncode != 0:
                msg = _('Restoring the database backup failed: %(msg)s', msg=to_unicode(errmsg.strip()))
                conf.log.error(msg)
                raise TracError(msg)
Exemple #2
0
    def generate_help(self, target, repo, inline=False, visibility=''):
        """Show documentation for named `target`.

		If `inline` is set, no header will be generated.
		For the `visibility` argument, see `PyDocMacro`.
		"""
        parser = WikidocParser(self.env, repo)
        html = ''

        if target == '':
            doc = parser.index(target)
        elif target[-1] == '*':
            doc = parser.index(target[:-1])
        else:
            doc = parser.document(target)

        if not target or target[-1] == '*':
            if not inline:
                html = '<h1 class="pod">%s</h1>\n\n' % doc.title
        else:
            if not inline:
                html = '<h1 class="pod">%s</h1>\n\n' % doc.title
        if doc.src_link != '':
            src = ' [<a href="%s" class="podindex">%s</a>]' % (doc.src_link,
                                                               doc.src_link)
        else:
            src = ''
        links = self._link_components(target)
        if len(links) == 0:
            html = html + '<p class="podpath">%s %s</p>\n' % ('@INC', src)
        else:
            links.insert(0, '<a href="%s">@INC</a>' % self.env.href.wikidoc())
            links[-1] = target.split('::')[-1]  # Unlink it
            html = html + '<p class="podpath">%s %s</p>\n' % ('::'.join(links),
                                                              src)
        #doc = doc + '<pre>%s</pre>\n' % '\n'.join(parser.paths)

        return to_unicode(html) + to_unicode(doc.html_index) + to_unicode(
            doc.html)
Exemple #3
0
    def post_process_request(self, req, template, data, content_type):
        if data and req.path_info == '/timeline' and \
                'TAGS_VIEW' in req.perm(Resource('tags')):

            def realm_handler(_, node, context):
                return query.match(node, [context.realm])

            query_str = req.args.get(self.key)
            if query_str is None and req.args.get('format') != 'rss':
                query_str = req.session.get('timeline.%s' % self.key)
            else:
                query_str = (query_str or '').strip()
                # Record tag query expression between visits.
                req.session['timeline.%s' % self.key] = query_str

            if data.get('events') and query_str:
                tag_system = TagSystem(self.env)
                try:
                    query = Query(query_str,
                                  attribute_handlers=dict(realm=realm_handler)
                            )
                except InvalidQuery, e:
                    add_warning(req, _("Tag query syntax error: %s"
                                       % to_unicode(e)))
                else:
                    all_realms = tag_system.get_taggable_realms(req.perm)
                    query_realms = set()
                    for m in REALM_RE.finditer(query.as_string()):
                        query_realms.add(m.group(1))
                    # Don't care about resources from non-taggable realms.
                    realms = not query_realms and all_realms or \
                             query_realms.intersection(all_realms)
                    events = []
                    self.log.debug("Filtering timeline events by tags '%s'",
                                   query_str)
                    for event in data['events']:
                        resource = event['data'][0]
                        if resource.realm in realms:
                            # Shortcut view permission checks here.
                            tags = tag_system.get_tags(None, resource)
                            if query(tags, context=resource):
                                events.append(event)
                    # Overwrite with filtered list.
                    data['events'] = events
            if query_str:
                # Add current value for next form rendering.
                data[self.key] = query_str
            elif self.key in req.session:
                del req.session[self.key]
Exemple #4
0
	def generate_help(self, target, repo, inline=False, visibility=''):
		"""Show documentation for named `target`.

		If `inline` is set, no header will be generated.
		For the `visibility` argument, see `PyDocMacro`.
		"""
		parser = WikidocParser(self.env, repo)
		html = ''

		if target == '':
			doc = parser.index(target)
		elif target[-1] == '*':
			doc = parser.index(target[:-1])
		else:
			doc = parser.document(target)

		if not target or target[-1] == '*':
			if not inline:
				html = '<h1 class="pod">%s</h1>\n\n' % doc.title
		else:
			if not inline:
				html = '<h1 class="pod">%s</h1>\n\n' % doc.title
		if doc.src_link != '':
			src = ' [<a href="%s" class="podindex">%s</a>]' % (doc.src_link, doc.src_link)
		else:
			src = ''
		links = self._link_components(target)
		if len(links) == 0:
			html = html + '<p class="podpath">%s %s</p>\n' % ('@INC', src)
		else:
			links.insert(0,'<a href="%s">@INC</a>' % self.env.href.wikidoc())
			links[-1] = target.split('::')[-1] # Unlink it
			html = html + '<p class="podpath">%s %s</p>\n' % ('::'.join(links), src)
		#doc = doc + '<pre>%s</pre>\n' % '\n'.join(parser.paths)

		return to_unicode(html) + to_unicode(doc.html_index) + to_unicode(doc.html)
Exemple #5
0
    def post_process_request(self, req, template, data, content_type):
        if data and req.path_info == '/timeline' and \
                'TAGS_VIEW' in req.perm(Resource('tags')):

            def realm_handler(_, node, context):
                return query.match(node, [context.realm])

            query_str = req.args.get(self.key)
            if query_str is None and req.args.get('format') != 'rss':
                query_str = req.session.get('timeline.%s' % self.key)
            else:
                query_str = (query_str or '').strip()
                # Record tag query expression between visits.
                req.session['timeline.%s' % self.key] = query_str

            if data.get('events') and query_str:
                tag_system = TagSystem(self.env)
                try:
                    query = Query(query_str,
                                  attribute_handlers=dict(realm=realm_handler))
                except InvalidQuery, e:
                    add_warning(
                        req, _("Tag query syntax error: %s" % to_unicode(e)))
                else:
                    all_realms = tag_system.get_taggable_realms(req.perm)
                    query_realms = set()
                    for m in REALM_RE.finditer(query.as_string()):
                        query_realms.add(m.group(1))
                    # Don't care about resources from non-taggable realms.
                    realms = not query_realms and all_realms or \
                             query_realms.intersection(all_realms)
                    events = []
                    self.log.debug("Filtering timeline events by tags '%s'" %
                                   query_str)
                    for event in data['events']:
                        resource = event['data'][0]
                        if resource.realm in realms:
                            # Shortcut view permission checks here.
                            tags = tag_system.get_tags(None, resource)
                            if query(tags, context=resource):
                                events.append(event)
                    # Overwrite with filtered list.
                    data['events'] = events
            if query_str:
                # Add current value for next form rendering.
                data[self.key] = query_str
            elif self.key in req.session:
                del req.session[self.key]
Exemple #6
0
    def process_request(self, req):
        req.perm.require('TAGS_VIEW')
        add_ctxtnav(req, 'Cloud', req.href.tags())
        match = re.match(r'/tags/?(.*)', req.path_info)
        if match.group(1):
            req.redirect(req.href('tags', q=match.group(1)))
        add_stylesheet(req, 'tags/css/tractags.css')
        query = req.args.get('q', '')
        data = {'title': 'Tags'}
        formatter = Formatter(self.env,
                              Context.from_request(req, Resource('tag')))

        realms = [p.get_taggable_realm() for p in self.tag_providers]
        checked_realms = [r for r in realms if r in req.args] or realms
        data['tag_realms'] = [{
            'name': realm,
            'checked': realm in checked_realms
        } for realm in realms]

        if query:
            data['tag_title'] = 'Showing objects matching "%s"' % query
        data['tag_query'] = query

        from tractags.macros import TagCloudMacro, ListTaggedMacro
        if not query:
            macro = TagCloudMacro(self.env)
        else:
            macro = ListTaggedMacro(self.env)
        query = '(%s) (%s)' % (' or '.join(
            ['realm:' + r for r in realms if r in checked_realms]), query)
        self.env.log.debug('Tag query: %s', query)
        try:
            data['tag_body'] = macro.expand_macro(formatter, None, query)
        except InvalidQuery, e:
            data['tag_query_error'] = to_unicode(e)
            data['tag_body'] = TagCloudMacro(self.env) \
                .expand_macro(formatter, None, '')
Exemple #7
0
    def process_request(self, req):
        req.perm.require('TAGS_VIEW')
        add_ctxtnav(req, 'Cloud', req.href.tags())
        match = re.match(r'/tags/?(.*)', req.path_info)
        if match.group(1):
            req.redirect(req.href('tags', q=match.group(1)))
        add_stylesheet(req, 'tags/css/tractags.css')
        query = req.args.get('q', '')
        data = {'title': 'Tags'}
        formatter = Formatter(
            self.env, Context.from_request(req, Resource('tag'))
            )

        realms = [p.get_taggable_realm() for p in self.tag_providers]
        checked_realms = [r for r in realms if r in req.args] or realms
        data['tag_realms'] = [{'name': realm, 'checked': realm in checked_realms}
                              for realm in realms]

        if query:
            data['tag_title'] = 'Showing objects matching "%s"' % query
        data['tag_query'] = query

        from tractags.macros import TagCloudMacro, ListTaggedMacro
        if not query:
            macro = TagCloudMacro(self.env)
        else:
            macro = ListTaggedMacro(self.env)
        query = '(%s) (%s)' % (' or '.join(['realm:' + r for r in realms
                                            if r in checked_realms]), query)
        self.env.log.debug('Tag query: %s', query)
        try:
            data['tag_body'] =  macro.expand_macro(formatter, None, query)
        except InvalidQuery, e:
            data['tag_query_error'] = to_unicode(e)
            data['tag_body'] = TagCloudMacro(self.env) \
                .expand_macro(formatter, None, '')
    def process_request(self, req):
        realm = req.args['realm']
        id = req.args['id']

        #Urls to generate the depgraph for a ticket is /depgraph/ticketnum
        #Urls to generate the depgraph for a milestone is /depgraph/milestone/milestone_name

        #List of tickets to generate the depgraph for
        tkt_ids = []
        if realm == 'milestone':
            #we need to query the list of tickets in the milestone
            query = Query(self.env, constraints={'milestone': [id]}, max=0)
            tkt_ids = [fields['id'] for fields in query.execute(req)]
        else:
            #the list is a single ticket
            tkt_ids = [int(id)]

        #the summary argument defines whether we place the ticket id or
        #its summary in the node's label
        label_summary = 0
        if 'summary' in req.args:
            label_summary = int(req.args.get('summary'))

        g = self._build_graph(req, tkt_ids, label_summary=label_summary)
        if req.path_info.endswith('/depgraph.png') or 'format' in req.args:
            format = req.args.get('format')
            if format == 'text':
                #in case g.__str__ returns unicode, we need to convert it in ascii
                req.send(to_unicode(g).encode('ascii', 'replace'), 'text/plain')
            elif format == 'debug':
                import pprint

                req.send(
                    pprint.pformat(
                        [TicketLinks(self.env, tkt_id) for tkt_id in tkt_ids]
                    ),
                    'text/plain')
            elif format is not None:
                if format in self.acceptable_formats:
                    req.send(g.render(self.dot_path, format), 'text/plain')
                else:
                    raise TracError(_("The %(format)s format is not allowed.", format=format))

            if self.use_gs:
                ps = g.render(self.dot_path, 'ps2')
                gs = subprocess.Popen(
                    [self.gs_path, '-q', '-dTextAlphaBits=4', '-dGraphicsAlphaBits=4', '-sDEVICE=png16m',
                     '-sOutputFile=%stdout%', '-'],
                    stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                img, err = gs.communicate(ps)
                if err:
                    self.log.debug('MasterTickets: Error from gs: %s', err)
            else:
                img = g.render(self.dot_path)
            req.send(img, 'image/png')
        else:
            data = {}

            #add a context link to enable/disable labels in nodes
            if label_summary:
                add_ctxtnav(req, 'Without labels', req.href(req.path_info, summary=0))
            else:
                add_ctxtnav(req, 'With labels', req.href(req.path_info, summary=1))

            if realm == 'milestone':
                add_ctxtnav(req, 'Back to Milestone: %s' % id, req.href.milestone(id))
                data['milestone'] = id
            else:
                data['ticket'] = id
                add_ctxtnav(req, 'Back to Ticket #%s' % id, req.href.ticket(id))
            data['graph'] = g
            data['graph_render'] = partial(g.render, self.dot_path)
            data['use_gs'] = self.use_gs

            return 'depgraph.html', data, None
Exemple #9
0
    def process_request(self, req):
        tid = req.args.get('id')
        if not tid:
            raise TracError(_('No ticket id provided.'))

        try:
            ticket = Ticket(self.env, tid)
        except ValueError:
            raise TracError(_('Invalid ticket id.'))

        # For access to the relation management, TICKET_MODIFY is required.
        req.perm.require('TICKET_MODIFY')
        relsys = RelationsSystem(self.env)

        data = {
            'relation': {},
        }
        if req.method == 'POST':
            # for modifying the relations TICKET_MODIFY is required for
            # both the source and the destination tickets

            if 'remove' in req.args:
                rellist = req.args.get('sel')
                if rellist:
                    if isinstance(rellist, basestring):
                        rellist = [
                            rellist,
                        ]
                    self.remove_relations(req, rellist)
            elif 'add' in req.args:
                relation = dict(
                    destination=req.args.get('dest_tid', ''),
                    type=req.args.get('reltype', ''),
                    comment=req.args.get('comment', ''),
                )
                try:
                    trs = TicketRelationsSpecifics(self.env)
                    dest_ticket = trs.find_ticket(relation['destination'])
                except NoSuchTicketError:
                    data['error'] = _('Invalid ticket ID.')
                else:
                    req.perm.require('TICKET_MODIFY', Resource(dest_ticket.id))

                    try:
                        dbrel = relsys.add(ticket, dest_ticket,
                                           relation['type'],
                                           relation['comment'], req.authname)
                    except NoSuchTicketError:
                        data['error'] = _('Invalid ticket ID.')
                    except UnknownRelationType:
                        data['error'] = _('Unknown relation type.')
                    except ValidationError as ex:
                        data['error'] = ex.message
                    else:
                        # Notify
                        try:
                            self.notify_relation_changed(dbrel)
                        except Exception, e:
                            self.log.error(
                                "Failure sending notification on"
                                "creation of relation: %s",
                                exception_to_unicode(e))
                            add_warning(
                                req,
                                _(
                                    "The relation has been added, "
                                    "but an error occurred while "
                                    "sending notifications: "
                                    "%(message)s",
                                    message=to_unicode(e)))

                if 'error' in data:
                    data['relation'] = relation
            else:
                raise TracError(_('Invalid operation.'))
Exemple #10
0
    def process_request(self, req):
        path_info = req.path_info[10:]

        if not path_info:
            raise TracError('No ticket specified')

        #list of tickets to generate the depgraph for
        tkt_ids = []
        milestone = None
        split_path = path_info.split('/', 2)

        #Urls to generate the depgraph for a ticket is /depgraph/ticketnum
        #Urls to generate the depgraph for a milestone is /depgraph/milestone/milestone_name
        if split_path[0] == 'milestone':
            #we need to query the list of tickets in the milestone
            milestone = split_path[1]
            query = Query(self.env,
                          constraints={'milestone': [milestone]},
                          max=0)
            tkt_ids = [fields['id'] for fields in query.execute()]
        else:
            #the list is a single ticket
            tkt_ids = [int(split_path[0])]

        #the summary argument defines whether we place the ticket id or
        #it's summary in the node's label
        label_summary = 0
        if 'summary' in req.args:
            label_summary = int(req.args.get('summary'))

        g = self._build_graph(req, tkt_ids, label_summary=label_summary)
        if path_info.endswith('/depgraph.png') or 'format' in req.args:
            format = req.args.get('format')
            if format == 'text':
                #in case g.__str__ returns unicode, we need to convert it in ascii
                req.send(
                    to_unicode(g).encode('ascii', 'replace'), 'text/plain')
            elif format == 'debug':
                import pprint
                req.send(
                    pprint.pformat(
                        [TicketLinks(self.env, tkt_id) for tkt_id in tkt_ids]),
                    'text/plain')
            elif format is not None:
                req.send(g.render(self.dot_path, format), 'text/plain')

            if self.use_gs:
                ps = g.render(self.dot_path, 'ps2')
                gs = subprocess.Popen([
                    self.gs_path, '-q', '-dTextAlphaBits=4',
                    '-dGraphicsAlphaBits=4', '-sDEVICE=png16m',
                    '-sOutputFile=%stdout%', '-'
                ],
                                      stdin=subprocess.PIPE,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
                img, err = gs.communicate(ps)
                if err:
                    self.log.debug('MasterTickets: Error from gs: %s', err)
            else:
                img = g.render(self.dot_path)
            req.send(img, 'image/png')
        else:
            data = {}

            #add a context link to enable/disable labels in nodes
            if label_summary:
                add_ctxtnav(req, 'Without labels',
                            req.href(req.path_info, summary=0))
            else:
                add_ctxtnav(req, 'With labels',
                            req.href(req.path_info, summary=1))

            if milestone is None:
                tkt = Ticket(self.env, tkt_ids[0])
                data['tkt'] = tkt
                add_ctxtnav(req, 'Back to Ticket #%s' % tkt.id,
                            req.href.ticket(tkt.id))
            else:
                add_ctxtnav(req, 'Back to Milestone %s' % milestone,
                            req.href.milestone(milestone))
            data['milestone'] = milestone
            data['graph'] = g
            data['graph_render'] = partial(g.render, self.dot_path)
            data['use_gs'] = self.use_gs

            return 'depgraph.html', data, None
    def import_files(self, dry_run=False):
        dry_run = True if dry_run in ['-n', '--dry-run'] else False
        try:
            env_name = self.env.project_identifier
        except AttributeError:
            # Since open_environment is not used in trac-admin commands
            # we need to manually set the project_identifier
            env_name = self.env.path.split('/')[-1]
            self.env.project_identifier = env_name
        download_data_list = self.get_download_data()
        path = conf.getEnvironmentDownloadsPath(self.env)
        if download_data_list is None:
            printout("env:%(env_name)s, download table was not found" %
                     {'env_name': self.env.project_identifier})
            return
        files_core = FilesCoreComponent(self.env)
        node_factory, download_config = files_core.files_node_factory_and_config(
        )
        env_name = download_config.env_name

        project_files = {}
        first_file = {}
        for download_data in download_data_list:
            filename = download_data['file']
            id_ = download_data['id']
            if filename not in project_files:
                project_files[filename] = []
                first_file[filename] = id_
            project_files[filename].append(id_)

        for download_data in download_data_list:
            filename = download_data['file']
            id_ = download_data['id']
            if not download_data['author_id']:
                printout(
                    "env:%(env_name)s file:%(download)s id:%(id_)s: "
                    "The author %(author)s of download %(download)s was not found."
                    % {
                        'env_name': env_name,
                        'download': filename,
                        'id_': id_,
                        'author': download_data['author']
                    })
                continue
            base_downloads_path = filesystem.safe_path(path, to_unicode(id_))
            original_node = FileSystemNode(base_downloads_path)
            original_node.populate_file_data(filename)
            from_path = original_node._abs_path_encoded
            existing_node = MappedFileNode.from_download_path(
                filename, node_factory, True)
            download_path = filename
            if len(project_files[filename]) > 1:
                download_path = get_download_path(id_, filename)
                to_node = MappedFileNode.from_download_path(
                    download_path, node_factory, True)
            else:
                # No duplicate downloads, put it into root
                to_node = existing_node
            if not to_node.is_download():
                printout(
                    "env:%(env_name)s file:%(download)s id:%(id_)s: "
                    "With %(rel_path)s: Download information is incorrect" % {
                        'env_name': env_name,
                        'download': filename,
                        'id_': id_,
                        'rel_path': to_node.relative_path
                    })
                continue
            if to_node.download().is_available():
                printout(
                    "env:%(env_name)s file:%(download)s id:%(id_)s: "
                    "With %(rel_path)s: The download information is already available"
                    % {
                        'env_name': env_name,
                        'download': filename,
                        'id_': id_,
                        'rel_path': to_node.relative_path
                    })
                continue
            elif to_node.exists():
                printout(
                    "env:%(env_name)s file:%(download)s id:%(id_)s: "
                    "With %(rel_path)s: The download already exists" % {
                        'env_name': env_name,
                        'download': filename,
                        'id_': id_,
                        'rel_path': to_node.relative_path
                    })
                continue
            can_be_removed = False
            download = self.populate_new_download(to_node.download(),
                                                  original_node, download_data)
            if len(project_files[filename]) > 1:
                # If there were duplicate filenames, special handling for them is needed
                if (existing_node.exists() and existing_node.is_file()
                        and existing_node.is_download()):
                    old_download = existing_node.download()
                    if (old_download.is_available()
                            and old_download.hash == download.hash
                            and old_download.version == 1 and
                            download.uploader_id == old_download.uploader_id
                            and download.created == old_download.created):
                        # Copy all information, which might be changed
                        download.clone_user_values(old_download)
                        download.count = old_download.count
                        can_be_removed = True
                    else:
                        # Else, we just accept that there has been changes
                        # Download count might be duplicated. In that case, manual work
                        # could be done.
                        printout(
                            "env:%(env_name)s file:%(download)s id:%(id_)s: "
                            "Cannot remove download because it is not original or has changed, "
                            "download count was %(count)s" % {
                                'env_name': env_name,
                                'id_': id_,
                                'download': filename,
                                'count': download.count
                            })
            if not dry_run:
                if os.path.sep in download_path:
                    parent_dir = to_node.get_parent_dir()
                    if not parent_dir.exists():
                        data = {'type': 'dir'}
                        FileSystemNode.create_check(parent_dir, data)
                        FileSystemNode.create_do(parent_dir, data)
                        FileSystemNode.create_post_process(parent_dir, data)
                shutil.copy2(from_path, to_node._abs_path_encoded)
                to_node.chmod()
                self.save_new_download(download)
                if can_be_removed:
                    existing_node.download().delete_completely()
                    existing_node.remove_do({})
            else:
                printout(
                    "env:%(env_name)s file:%(download)s id:%(id_)s: "
                    "Would copy file to %(download_path)s%(other)s" % {
                        'env_name':
                        env_name,
                        'id_':
                        id_,
                        'download':
                        filename,
                        'download_path':
                        to_node.download().download_path,
                        'other':
                        can_be_removed and ', and would also remove original'
                        or ''
                    })

        was_enabled = False
        if not self.env.is_component_enabled(DOWNLOADS_GLUE_COMPONENT):
            if not dry_run:
                self.env.config.set('components', DOWNLOADS_GLUE_COMPONENT,
                                    'enabled')
                self.env.config.save()
            was_enabled = True
        if download_data_list:
            if was_enabled:
                printout(
                    "env:%(env_name)s: downloads handled, component %(component)s enabled."
                    % {
                        'env_name': env_name,
                        'component': DOWNLOADS_GLUE_COMPONENT
                    })
            else:
                printout("env:%(env_name)s: downloads handled." %
                         {'env_name': env_name})
        else:
            printout(
                "env:%(env_name)s: no downloads found, component %(component)s enabled."
                % {
                    'env_name': env_name,
                    'component': DOWNLOADS_GLUE_COMPONENT
                })
Exemple #12
0
    def process_request(self, req):
        req.perm.require('TEAMCALENDAR_VIEW')
        pid = self.pm.get_current_project(req)
        syllabus_id = req.data['syllabus_id']
        self.pm.check_component_enabled(self, syllabus_id=syllabus_id)

        work_days = [int(d) for d in self.work_days.syllabus(syllabus_id)]
        weeks_prior = self.weeks_prior.syllabus(syllabus_id)
        weeks_after = self.weeks_after.syllabus(syllabus_id)

        data = {}

        from_date = req.args.get('from_date', '')
        to_date   = req.args.get('to_date', '')
        from_date = from_date and parse_date_only(from_date) or self.find_default_start(weeks_prior)
        to_date   = to_date   and parse_date_only(to_date)   or self.find_default_end(weeks_after)

        # Check time interval
        force_default = True
        delta = (to_date - from_date).days
        if delta < 0:
            add_warning(req, _('Negative time interval selected. Using default.'))
        elif delta > self.MAX_INTERVAL:
            add_warning(req, _('Too big time interval selected (%(interval)s). '
                               'Using default.', interval=pretty_timedelta(to_date, from_date)))
        else:
            force_default = False

        # Reset interval to default
        if force_default:
            from_date = self.find_default_start(weeks_prior)
            to_date   = self.find_default_end(weeks_after)

        # Message
        data['message'] = ''

        # Current user
        data['authname'] = authname = req.authname

        # Can we update?

        data['can_update_own']    = can_update_own    = ('TEAMCALENDAR_UPDATE_OWN'    in req.perm)
        data['can_update_others'] = can_update_others = ('TEAMCALENDAR_UPDATE_OTHERS' in req.perm)
        data['can_update']        = can_update_own or can_update_others

        # Store dates
        data['today']     = date.today()
        data['from_date'] = from_date
        data['to_date']   = to_date

        # Get all people
        data['people'] = people = self.pm.get_project_users(pid)

        # Update timetable if required
        if 'update_calendar' in req.args:
            req.perm.require('TEAMCALENDAR_UPDATE_OWN')

            # deliberately override dates: want to show result of update
            from_date = current_date = parse_date_only(req.args.get('orig_from_date', ''))
            to_date   = parse_date_only(req.args.get('orig_to_date', ''))
            tuples = []
            while current_date <= to_date:
                if can_update_others:
                    for person in people:
                        status = Decimal(req.args.get(u'%s.%s' % (current_date.isoformat(), person), False))
                        tuples.append((current_date, person, status,))
                elif can_update_own:
                    status = Decimal(req.args.get(u'%s.%s' % (current_date.isoformat(), authname), False))
                    tuples.append((current_date, authname, status,))
                current_date += timedelta(1)

            self.update_timetable(tuples, pid, from_date, to_date)
            data['message'] = _('Timetable updated.')

        # Get the current timetable
        timetable = self.get_timetable(from_date, to_date, people, pid, work_days)

        data['timetable'] = []
        current_date = from_date
        while current_date <= to_date:
            data['timetable'].append(dict(date=current_date, people=timetable[current_date]))
            current_date += timedelta(1)

        for day in data['timetable']:
            day['strdate'] = to_unicode(day['date'].strftime('%a %d/%m/%Y'))

        add_stylesheet(req, 'common/css/jquery-ui/jquery.ui.core.css')
        add_stylesheet(req, 'common/css/jquery-ui/jquery.ui.datepicker.css')
        add_stylesheet(req, 'common/css/jquery-ui/jquery.ui.theme.css')
        add_script(req, 'common/js/jquery.ui.core.js')
        add_script(req, 'common/js/jquery.ui.widget.js')
        add_script(req, 'common/js/jquery.ui.datepicker.js')
        add_script(req, 'common/js/datepicker.js')

        add_stylesheet(req, 'teamcalendar/css/calendar.css')

        data['_'] = _
        return 'teamcalendar.html', data, None
Exemple #13
0
          except Exception, e:
              content = tag.html(tag.body(tag.strong("DB Error: " + unicode(e))))
          html = content.generate().render("xhtml")
          req.send_response(200)
          req.send_header('Cache-control', 'must-revalidate')
          req.send_header('Content-Type', 'text/html;charset=utf-8')
          req.send_header('Content-Length', len(html))
          req.end_headers()

          if req.method != 'HEAD':
             req.write(html)
          raise RequestDone

        try:
            hash = req.path_info[9:-3]
            mm = to_unicode(self._get_cache(hash)).encode('utf-8')
            req.send_response(200)
            req.send_header('Cache-control', 'must-revalidate')
            req.send_header('Content-Type', 'application/x-freemind')
            req.send_header('Content-Length', len(mm))
            req.end_headers()
            if req.method != 'HEAD':
              req.write( mm )
        except RequestDone:
            pass
        except Exception, e:
            self.log.error(e)
            req.send_response(500)
            try:
              req.end_headers()
              req.write( str(e) )
    def import_files(self, dry_run=False):
        dry_run = True if dry_run in ['-n', '--dry-run'] else False
        try:
            env_name = self.env.project_identifier
        except AttributeError:
            # Since open_environment is not used in trac-admin commands
            # we need to manually set the project_identifier
            env_name = self.env.path.split('/')[-1]
            self.env.project_identifier = env_name
        download_data_list = self.get_download_data()
        path = conf.getEnvironmentDownloadsPath(self.env)
        if download_data_list is None:
            printout("env:%(env_name)s, download table was not found" %
                     {'env_name': self.env.project_identifier})
            return
        files_core = FilesCoreComponent(self.env)
        node_factory, download_config = files_core.files_node_factory_and_config()
        env_name = download_config.env_name

        project_files = {}
        first_file = {}
        for download_data in download_data_list:
            filename = download_data['file']
            id_ = download_data['id']
            if filename not in project_files:
                project_files[filename] = []
                first_file[filename] = id_
            project_files[filename].append(id_)

        for download_data in download_data_list:
            filename = download_data['file']
            id_ = download_data['id']
            if not download_data['author_id']:
                printout("env:%(env_name)s file:%(download)s id:%(id_)s: "
                         "The author %(author)s of download %(download)s was not found." %
                         {'env_name':env_name, 'download': filename, 'id_': id_,
                          'author':download_data['author']})
                continue
            base_downloads_path = filesystem.safe_path(path, to_unicode(id_))
            original_node = FileSystemNode(base_downloads_path)
            original_node.populate_file_data(filename)
            from_path = original_node._abs_path_encoded
            existing_node = MappedFileNode.from_download_path(filename, node_factory, True)
            download_path = filename
            if len(project_files[filename]) > 1:
                download_path = get_download_path(id_, filename)
                to_node = MappedFileNode.from_download_path(download_path, node_factory, True)
            else:
                # No duplicate downloads, put it into root
                to_node = existing_node
            if not to_node.is_download():
                printout("env:%(env_name)s file:%(download)s id:%(id_)s: "
                         "With %(rel_path)s: Download information is incorrect" %
                         {'env_name':env_name, 'download': filename, 'id_': id_,
                          'rel_path':to_node.relative_path})
                continue
            if to_node.download().is_available():
                printout("env:%(env_name)s file:%(download)s id:%(id_)s: "
                         "With %(rel_path)s: The download information is already available" %
                         {'env_name':env_name, 'download': filename, 'id_': id_,
                          'rel_path':to_node.relative_path})
                continue
            elif to_node.exists():
                printout("env:%(env_name)s file:%(download)s id:%(id_)s: "
                         "With %(rel_path)s: The download already exists" %
                         {'env_name':env_name, 'download': filename, 'id_': id_,
                          'rel_path':to_node.relative_path})
                continue
            can_be_removed = False
            download = self.populate_new_download(to_node.download(), original_node,
                download_data)
            if len(project_files[filename]) > 1:
                # If there were duplicate filenames, special handling for them is needed
                if (existing_node.exists() and existing_node.is_file()
                        and existing_node.is_download()):
                    old_download = existing_node.download()
                    if (old_download.is_available() and old_download.hash == download.hash
                        and old_download.version == 1
                        and download.uploader_id == old_download.uploader_id
                        and download.created == old_download.created):
                        # Copy all information, which might be changed
                        download.clone_user_values(old_download)
                        download.count = old_download.count
                        can_be_removed = True
                    else:
                        # Else, we just accept that there has been changes
                        # Download count might be duplicated. In that case, manual work
                        # could be done.
                        printout("env:%(env_name)s file:%(download)s id:%(id_)s: "
                             "Cannot remove download because it is not original or has changed, "
                             "download count was %(count)s" %
                             {'env_name':env_name, 'id_': id_, 'download': filename,
                              'count': download.count})
            if not dry_run:
                if os.path.sep in download_path:
                    parent_dir = to_node.get_parent_dir()
                    if not parent_dir.exists():
                        data = {'type': 'dir'}
                        FileSystemNode.create_check(parent_dir, data)
                        FileSystemNode.create_do(parent_dir, data)
                        FileSystemNode.create_post_process(parent_dir, data)
                shutil.copy2(from_path, to_node._abs_path_encoded)
                to_node.chmod()
                self.save_new_download(download)
                if can_be_removed:
                    existing_node.download().delete_completely()
                    existing_node.remove_do({})
            else:
                printout("env:%(env_name)s file:%(download)s id:%(id_)s: "
                         "Would copy file to %(download_path)s%(other)s" %
                         {'env_name':env_name, 'id_': id_, 'download': filename,
                          'download_path': to_node.download().download_path,
                          'other': can_be_removed and ', and would also remove original' or ''})

        was_enabled = False
        if not self.env.is_component_enabled(DOWNLOADS_GLUE_COMPONENT):
            if not dry_run:
                self.env.config.set('components', DOWNLOADS_GLUE_COMPONENT, 'enabled')
                self.env.config.save()
            was_enabled = True
        if download_data_list:
            if was_enabled:
                printout("env:%(env_name)s: downloads handled, component %(component)s enabled."
                         %{'env_name': env_name, 'component': DOWNLOADS_GLUE_COMPONENT})
            else:
                printout("env:%(env_name)s: downloads handled." % {'env_name': env_name})
        else:
            printout("env:%(env_name)s: no downloads found, component %(component)s enabled."
                    %{'env_name': env_name, 'component': DOWNLOADS_GLUE_COMPONENT})
Exemple #15
0
    def _parse_heading(self, formatter, match, fullmatch):
        shorten = False
        match = match.strip()

        depth = min(len(fullmatch.group('nhdepth')), 6)

        try:
            formatter.close_table()
            formatter.close_paragraph()
            formatter.close_indentation()
            formatter.close_list()
            formatter.close_def_list()
        except:
            pass

        ## BEGIN of code provided by Joshua Hoke, see th:#4521.
        # moved and modified by Martin

        # Figure out headline numbering for outline
        counters = self.outline_counters.get(formatter, [])

        if formatter not in self.outline_counters:
            self.outline_counters[formatter] = counters

        if len(counters) < depth:
            delta = depth - len(counters)
            counters.extend([0] * (delta - 1))
            counters.append(1)
        else:
            del counters[depth:]
            counters[-1] += 1
        ## END

        num = fullmatch.group('nheadnum') or ''
        anchor = fullmatch.group('nhanchor') or ''
        heading_text = match[depth + 1 + len(num):-depth - 1 -
                             len(anchor)].strip()

        num = num.strip()
        if num and num[-1] == '.':
            num = num[:-1]
        if num:
            numbers = [self._int(n) for n in num.split('.')]
            if len(numbers) == 1:
                counters[depth - 1] = numbers[0]
            else:
                if len(numbers) > depth:
                    del numbers[depth:]
                n = 0
                while numbers[n] == -1:
                    n = n + 1
                counters[depth - len(numbers[n:]):] = numbers[n:]

        if not heading_text:
            return tag()

        heading = format_to_oneliner(formatter.env, formatter.context,
                                     heading_text, False)

        if anchor:
            anchor = anchor[1:]
        else:
            sans_markup = plaintext(heading, keeplinebreaks=False)
            anchor = WikiParser._anchor_re.sub('', sans_markup)
            if not anchor or anchor[0].isdigit() or anchor[0] in '.-':
                # an ID must start with a Name-start character in XHTML
                anchor = 'a' + anchor  # keeping 'a' for backward compat
        i = 1
        anchor_base = anchor
        while anchor in formatter._anchors:
            anchor = anchor_base + str(i)
            i += 1
        formatter._anchors[anchor] = True

        # Add number directly if CSS is not used
        s = self.startatleveltwo and 1 or 0
        #self.env.log.debug('NHL:' + str(counters))
        while s < len(counters) and counters[s] == 0:
            s = s + 1

        oheading_text = heading_text
        heading_text = '.'.join(map(str, counters[s:]) + [" "]) + heading_text

        if self.number_outline:
            oheading_text = heading_text

        heading = format_to_oneliner(formatter.env, formatter.context,
                                     heading_text, False)
        oheading = format_to_oneliner(formatter.env, formatter.context,
                                      oheading_text, False)

        ## BEGIN of code provided by Joshua Hoke, see th:#4521.
        # modified by Martin

        # Strip out link tags
        oheading = re.sub(r'</?a(?: .*?)?>', '', oheading)

        try:
            # Add heading to outline
            formatter.outline.append((depth, anchor, oheading))
        except AttributeError:
            # Probably a type of formatter that doesn't build an
            # outline.
            pass
        ## END of provided code

        html = tag.__getattr__('h' + str(depth))(heading, id=anchor)
        if self.fix_paragraph:
            return '</p>' + to_unicode(html) + '<p>'
        else:
            return html
Exemple #16
0
    def process_request(self, req):
        tid = req.args.get('id')
        if not tid:
            raise TracError(_('No ticket id provided.'))

        try:
            ticket = Ticket(self.env, tid)
        except ValueError:
            raise TracError(_('Invalid ticket id.'))

        req.perm.require('TICKET_VIEW')
        relsys = RelationsSystem(self.env)

        data = {
            'relation': {},
        }
        if req.method == 'POST':
            # for modifying the relations TICKET_MODIFY is required for
            # both the source and the destination tickets
            req.perm.require('TICKET_MODIFY')

            if 'remove' in req.args:
                rellist = req.args.get('sel')
                if rellist:
                    if isinstance(rellist, basestring):
                        rellist = [rellist, ]
                    self.remove_relations(req, rellist)
            elif 'add' in req.args:
                relation = dict(
                    destination=req.args.get('dest_tid', ''),
                    type=req.args.get('reltype', ''),
                    comment=req.args.get('comment', ''),
                )
                try:
                    trs = TicketRelationsSpecifics(self.env)
                    dest_ticket = trs.find_ticket(relation['destination'])
                except NoSuchTicketError:
                    data['error'] = _('Invalid ticket ID.')
                else:
                    req.perm.require('TICKET_MODIFY', Resource(dest_ticket.id))

                    try:
                        dbrel = relsys.add(ticket, dest_ticket,
                            relation['type'],
                            relation['comment'],
                            req.authname)
                    except NoSuchTicketError:
                        data['error'] = _('Invalid ticket ID.')
                    except UnknownRelationType:
                        data['error'] = _('Unknown relation type.')
                    except ValidationError as ex:
                        data['error'] = ex.message
                    else:
                        # Notify
                        try:
                            self.notify_relation_changed(dbrel)
                        except Exception, e:
                            self.log.error("Failure sending notification on"
                                           "creation of relation: %s",
                                           exception_to_unicode(e))
                            add_warning(req, _("The relation has been added, but an "
                                               "error occurred while sending"
                                               "notifications: " "%(message)s",
                                               message=to_unicode(e)))

                if 'error' in data:
                    data['relation'] = relation
            else:
                raise TracError(_('Invalid operation.'))
Exemple #17
0
    def process_request(self, req):
        realm = req.args['realm']
        id_ = req.args['id']

        if not which(self.dot_path):
            raise TracError(
                _("Path to dot executable is invalid: %(path)s",
                  path=self.dot_path))

        # Urls to generate the depgraph for a ticket is /depgraph/ticketnum
        # Urls to generate the depgraph for a milestone is
        # /depgraph/milestone/milestone_name

        # List of tickets to generate the depgraph.
        if realm == 'milestone':
            # We need to query the list of tickets in the milestone
            query = Query(self.env, constraints={'milestone': [id_]}, max=0)
            tkt_ids = [fields['id'] for fields in query.execute(req)]
        else:
            tid = as_int(id_, None)
            if tid is None:
                raise TracError(
                    tag_("%(id)s is not a valid ticket id.", id=html.tt(id_)))
            tkt_ids = [tid]

        # The summary argument defines whether we place the ticket id or
        # its summary in the node's label
        label_summary = 0
        if 'summary' in req.args:
            label_summary = int(req.args.get('summary'))

        g = self._build_graph(req, tkt_ids, label_summary=label_summary)
        if req.path_info.endswith('/depgraph.png') or 'format' in req.args:
            format_ = req.args.get('format')
            if format_ == 'text':
                # In case g.__str__ returns unicode, convert it in ascii
                req.send(
                    to_unicode(g).encode('ascii', 'replace'), 'text/plain')
            elif format_ == 'debug':
                import pprint

                req.send(
                    pprint.pformat(
                        [TicketLinks(self.env, tkt_id) for tkt_id in tkt_ids]),
                    'text/plain')
            elif format_ is not None:
                if format_ in self.acceptable_formats:
                    req.send(g.render(self.dot_path, format_), 'text/plain')
                else:
                    raise TracError(
                        _("The %(format)s format is not allowed.",
                          format=format_))

            if self.use_gs:
                ps = g.render(self.dot_path, 'ps2')
                gs = subprocess.Popen([
                    self.gs_path, '-q', '-dTextAlphaBits=4',
                    '-dGraphicsAlphaBits=4', '-sDEVICE=png16m',
                    '-sOutputFile=%stdout%', '-'
                ],
                                      stdin=subprocess.PIPE,
                                      stdout=subprocess.PIPE,
                                      stderr=subprocess.PIPE)
                img, err = gs.communicate(ps)
                if err:
                    self.log.debug('MasterTickets: Error from gs: %s', err)
            else:
                img = g.render(self.dot_path)
            req.send(img, 'image/png')
        else:
            data = {}

            # Add a context link to enable/disable labels in nodes.
            if label_summary:
                add_ctxtnav(req, 'Without labels',
                            req.href(req.path_info, summary=0))
            else:
                add_ctxtnav(req, 'With labels',
                            req.href(req.path_info, summary=1))

            if realm == 'milestone':
                add_ctxtnav(req, 'Back to Milestone: %s' % id_,
                            req.href.milestone(id_))
                data['milestone'] = id_
            else:
                data['ticket'] = id_
                add_ctxtnav(req, 'Back to Ticket #%s' % id_,
                            req.href.ticket(id_))
            data['graph'] = g
            data['graph_render'] = functools.partial(g.render, self.dot_path)
            data['use_gs'] = self.use_gs

            return 'depgraph.html', data, None
Exemple #18
0
    def process_request(self, req):
        req.perm.require('TAGS_VIEW')

        match = re.match(r'/tags/?(.*)', req.path_info)
        tag_id = match.group(1) and match.group(1) or None
        query = req.args.get('q', '')

        realms = [p.get_taggable_realm() for p in self.tag_providers
                  if (not hasattr(p, 'check_permission') or \
                      p.check_permission(req.perm, 'view'))]
        if not (tag_id or query) or [r for r in realms if r in req.args] == []:
            for realm in realms:
                if not realm in self.exclude_realms:
                    req.args[realm] = 'on'
        checked_realms = [r for r in realms if r in req.args]
        realm_args = dict(
            zip([r for r in checked_realms], ['on' for r in checked_realms]))
        if tag_id and not re.match(r"""(['"]?)(\S+)\1$""", tag_id, re.UNICODE):
            # Convert complex, invalid tag ID's to query expression.
            req.redirect(req.href.tags(realm_args, q=tag_id))
        elif query:
            single_page = re.match(r"""(['"]?)(\S+)\1$""", query, re.UNICODE)
            if single_page:
                # Convert simple query for single tag ID.
                req.redirect(req.href.tags(single_page.group(2), realm_args))

        data = dict(page_title=_("Tags"), checked_realms=checked_realms)
        # Populate the TagsQuery form field.
        data['tag_query'] = tag_id and tag_id or query
        data['tag_realms'] = list(
            dict(name=realm, checked=realm in checked_realms)
            for realm in realms)
        if tag_id:
            page_name = tag_id
            page = WikiPage(self.env, page_name)
            data['tag_page'] = page

        macros = TagWikiMacros(self.env)
        if query or tag_id:
            # TRANSLATOR: The meta-nav link label.
            add_ctxtnav(req, _("Back to Cloud"), req.href.tags())
            macro = 'ListTagged'
            args = '%s,format=%s,cols=%s,realm=%s' \
                   % (tag_id and tag_id or query, self.default_format,
                      self.default_cols, '|'.join(checked_realms))
            data['mincount'] = None
        else:
            macro = 'TagCloud'
            mincount = as_int(req.args.get('mincount', None),
                              self.cloud_mincount)
            args = 'mincount=%s,realm=%s' % (mincount,
                                             '|'.join(checked_realms))
            data['mincount'] = mincount
        formatter = Formatter(self.env,
                              Context.from_request(req, Resource('tag')))
        self.env.log.debug('Tag macro arguments: %s', args)
        try:
            # Query string without realm throws 'NotImplementedError'.
            data['tag_body'] = len(checked_realms) > 0 and \
                               macros.expand_macro(formatter, macro, args) \
                               or ''
        except InvalidQuery, e:
            data['tag_query_error'] = to_unicode(e)
            data['tag_body'] = macros.expand_macro(formatter, 'TagCloud', '')
Exemple #19
0
    def _parse_heading(self, formatter, match, fullmatch):
        shorten = False
        match = match.strip()

        depth = min(len(fullmatch.group('nhdepth')), 6)

        try:
          formatter.close_table()
          formatter.close_paragraph()
          formatter.close_indentation()
          formatter.close_list()
          formatter.close_def_list()
        except:
          pass

        ## BEGIN of code provided by Joshua Hoke, see th:#4521.
        # moved and modified by Martin

        # Figure out headline numbering for outline
        counters = self.outline_counters.get(formatter, [])

        if formatter not in self.outline_counters:
            self.outline_counters[formatter] = counters

        if len(counters) < depth:
            delta = depth - len(counters)
            counters.extend([0] * (delta - 1))
            counters.append(1)
        else:
            del counters[depth:]
            counters[-1] += 1
        ## END

        num    = fullmatch.group('nheadnum') or ''
        anchor = fullmatch.group('nhanchor') or ''
        heading_text = match[depth+1+len(num):-depth-1-len(anchor)].strip()

        num = num.strip()
        if num and num[-1] == '.':
          num = num[:-1]
        if num:
          numbers = [self._int(n) for n in num.split('.')]
          if len(numbers) == 1:
            counters[depth-1] = numbers[0]
          else:
            if len(numbers) > depth:
              del numbers[depth:]
            n = 0
            while numbers[n] == -1:
              n = n + 1
            counters[depth-len(numbers[n:]):] = numbers[n:]

        if not heading_text:
          return tag()

        heading = format_to_oneliner(formatter.env, formatter.context, 
            heading_text, False)

        if anchor:
            anchor = anchor[1:]
        else:
            sans_markup = plaintext(heading, keeplinebreaks=False)
            anchor = WikiParser._anchor_re.sub('', sans_markup)
            if not anchor or anchor[0].isdigit() or anchor[0] in '.-':
                # an ID must start with a Name-start character in XHTML
                anchor = 'a' + anchor # keeping 'a' for backward compat
        i = 1
        anchor_base = anchor
        while anchor in formatter._anchors:
            anchor = anchor_base + str(i)
            i += 1
        formatter._anchors[anchor] = True

        # Add number directly if CSS is not used
        s = self.startatleveltwo and 1 or 0
        #self.env.log.debug('NHL:' + str(counters))
        while s < len(counters) and counters[s] == 0:
          s = s + 1

        oheading_text = heading_text
        heading_text = '.'.join(map(str, counters[s:]) + [" "]) + heading_text

        if self.number_outline:
          oheading_text = heading_text

        heading = format_to_oneliner(formatter.env, formatter.context, 
            heading_text, False)
        oheading = format_to_oneliner(formatter.env, formatter.context, 
            oheading_text, False)

        ## BEGIN of code provided by Joshua Hoke, see th:#4521.
        # modified by Martin

        # Strip out link tags
        oheading = re.sub(r'</?a(?: .*?)?>', '', oheading)

        try:
            # Add heading to outline
            formatter.outline.append((depth, anchor, oheading))
        except AttributeError:
            # Probably a type of formatter that doesn't build an
            # outline.
            pass
        ## END of provided code

        html = tag.__getattr__('h' + str(depth))(
            heading, id = anchor)
        if self.fix_paragraph:
          return '</p>' + to_unicode(html) + '<p>'
        else:
          return html
Exemple #20
0
    def process_request(self, req):
        req.perm.require('TAGS_VIEW')

        match = re.match(r'/tags/?(.*)', req.path_info)
        tag_id = match.group(1) and match.group(1) or None
        query = req.args.get('q', '')

        # Consider only providers, that are permitted for display.
        tag_system = TagSystem(self.env)
        all_realms = tag_system.get_taggable_realms(req.perm)
        if not (tag_id or query) or [r for r in all_realms
                                     if r in req.args] == []:
            for realm in all_realms:
                if not realm in self.exclude_realms:
                    req.args[realm] = 'on'
        checked_realms = [r for r in all_realms if r in req.args]
        if query:
            # Add permitted realms from query expression.
            checked_realms.extend(query_realms(query, all_realms))
        realm_args = dict(zip([r for r in checked_realms],
                              ['on' for r in checked_realms]))
        # Switch between single tag and tag query expression mode.
        if tag_id and not re.match(r"""(['"]?)(\S+)\1$""", tag_id, re.UNICODE):
            # Convert complex, invalid tag ID's --> query expression.
            req.redirect(req.href.tags(realm_args, q=tag_id))
        elif query:
            single_page = re.match(r"""(['"]?)(\S+)\1$""", query, re.UNICODE)
            if single_page:
                # Convert simple query --> single tag.
                req.redirect(req.href.tags(single_page.group(2), realm_args))

        data = dict(page_title=_("Tags"), checked_realms=checked_realms)
        # Populate the TagsQuery form field.
        data['tag_query'] = tag_id and tag_id or query
        data['tag_realms'] = list(dict(name=realm,
                                       checked=realm in checked_realms)
                                  for realm in all_realms)
        if tag_id:
            data['tag_page'] = WikiPage(self.env,
                                        tag_system.wiki_page_prefix + tag_id)
        if query or tag_id:
            macro = 'ListTagged'
            # TRANSLATOR: The meta-nav link label.
            add_ctxtnav(req, _("Back to Cloud"), req.href.tags())
            args = "%s,format=%s,cols=%s" % \
                   (tag_id and tag_id or query, self.default_format,
                    self.default_cols)
            data['mincount'] = None
        else:
            macro = 'TagCloud'
            mincount = as_int(req.args.get('mincount', None),
                              self.cloud_mincount)
            args = mincount and "mincount=%s" % mincount or None
            data['mincount'] = mincount
        formatter = Formatter(self.env, Context.from_request(req,
                                                             Resource('tag')))
        self.env.log.debug("%s macro arguments: %s", macro,
                           args and args or '(none)')
        macros = TagWikiMacros(self.env)
        try:
            # Query string without realm throws 'NotImplementedError'.
            data['tag_body'] = checked_realms and \
                               macros.expand_macro(formatter, macro, args,
                                                   realms=checked_realms) \
                               or ''
        except InvalidQuery, e:
            data['tag_query_error'] = to_unicode(e)
            data['tag_body'] = macros.expand_macro(formatter, 'TagCloud', '')
Exemple #21
0
    def process_request(self, req):
        req.perm.require("TAGS_VIEW")

        match = re.match(r"/tags/?(.*)", req.path_info)
        tag_id = match.group(1) and match.group(1) or None
        query = req.args.get("q", "")

        realms = [
            p.get_taggable_realm()
            for p in self.tag_providers
            if (not hasattr(p, "check_permission") or p.check_permission(req.perm, "view"))
        ]
        if not (tag_id or query) or [r for r in realms if r in req.args] == []:
            for realm in realms:
                if not realm in self.exclude_realms:
                    req.args[realm] = "on"
        checked_realms = [r for r in realms if r in req.args]
        realm_args = dict(zip([r for r in checked_realms], ["on" for r in checked_realms]))
        if tag_id and not re.match(r"""(['"]?)(\S+)\1$""", tag_id, re.UNICODE):
            # Convert complex, invalid tag ID's to query expression.
            req.redirect(req.href.tags(realm_args, q=tag_id))
        elif query:
            single_page = re.match(r"""(['"]?)(\S+)\1$""", query, re.UNICODE)
            if single_page:
                # Convert simple query for single tag ID.
                req.redirect(req.href.tags(single_page.group(2), realm_args))

        data = dict(page_title=_("Tags"), checked_realms=checked_realms)
        # Populate the TagsQuery form field.
        data["tag_query"] = tag_id and tag_id or query
        data["tag_realms"] = list(dict(name=realm, checked=realm in checked_realms) for realm in realms)
        if tag_id:
            page_name = tag_id
            page = WikiPage(self.env, page_name)
            data["tag_page"] = page

        macros = TagWikiMacros(self.env)
        if query or tag_id:
            # TRANSLATOR: The meta-nav link label.
            add_ctxtnav(req, _("Back to Cloud"), req.href.tags())
            macro = "ListTagged"
            args = "%s,format=%s,cols=%s,realm=%s" % (
                tag_id and tag_id or query,
                self.default_format,
                self.default_cols,
                "|".join(checked_realms),
            )
            data["mincount"] = None
        else:
            macro = "TagCloud"
            mincount = as_int(req.args.get("mincount", None), self.cloud_mincount)
            args = "mincount=%s,realm=%s" % (mincount, "|".join(checked_realms))
            data["mincount"] = mincount
        formatter = Formatter(self.env, Context.from_request(req, Resource("tag")))
        self.env.log.debug("Tag macro arguments: %s", args)
        try:
            # Query string without realm throws 'NotImplementedError'.
            data["tag_body"] = len(checked_realms) > 0 and macros.expand_macro(formatter, macro, args) or ""
        except InvalidQuery, e:
            data["tag_query_error"] = to_unicode(e)
            data["tag_body"] = macros.expand_macro(formatter, "TagCloud", "")
Exemple #22
0
    def process_request(self, req):
        path_info = req.path_info[10:]

        img_format = req.args.get('format')
        m = self.IMAGE_RE.search(path_info)
        is_img = m is not None
        if is_img:
            img_format = m.group(1)
            path_info = path_info[:-(10+len(img_format))]

        is_full_graph = not path_info
        with_clusters   = req.args.getbool('with_clusters', False)

        cur_pid = self.pm.get_current_project(req)

        #list of tickets to generate the depgraph for
        tkt_ids=[]

        if is_full_graph:
            # depgraph for full project
            # cluster by milestone
            self.pm.check_component_enabled(self, pid=cur_pid)
            db = self.env.get_read_db()
            cursor = db.cursor()
            if with_clusters:
                q = '''
                    SELECT milestone, id
                    FROM ticket
                    WHERE project_id=%s
                    ORDER BY milestone, id
                '''
            else:
                q = '''
                    SELECT id
                    FROM ticket
                    WHERE project_id=%s
                    ORDER BY id
                '''
            cursor.execute(q, (cur_pid,))
            rows = cursor.fetchall()
            if with_clusters:
                tkt_ids = rows
            else:
                tkt_ids = [r[0] for r in rows]
        else:
            # degraph for resource
            resource = get_real_resource_from_url(self.env, path_info, req.args)

            # project check
            res_pid = resource.pid
            self.pm.check_component_enabled(self, pid=res_pid)
            if res_pid != cur_pid:
                self.pm.redirect_to_project(req, res_pid)

            is_milestone = isinstance(resource, Milestone)
            #Urls to generate the depgraph for a ticket is /depgraph/ticketnum
            #Urls to generate the depgraph for a milestone is /depgraph/milestone/milestone_name
            if is_milestone:
                #we need to query the list of tickets in the milestone
                milestone = resource
                query=Query(self.env, constraints={'milestone' : [milestone.name]}, max=0, project=milestone.pid)
                tkt_ids=[fields['id'] for fields in query.execute()]
            else:
                #the list is a single ticket
                ticket = resource
                tkt_ids = [ticket.id]

        #the summary argument defines whether we place the ticket id or
        #it's summary in the node's label
        label_summary=0
        if 'summary' in req.args:
            label_summary=int(req.args.get('summary'))

        clustering = is_full_graph and with_clusters
        g = self._build_graph(req, tkt_ids, label_summary=label_summary, with_clusters=clustering)
        if is_img or img_format:
            if img_format == 'text':
                #in case g.__str__ returns unicode, we need to convert it in ascii
                req.send(to_unicode(g).encode('ascii', 'replace'), 'text/plain')
            elif img_format == 'debug':
                import pprint
                req.send(
                    pprint.pformat(
                        [TicketLinks(self.env, tkt_id) for tkt_id in tkt_ids]
                        ),
                    'text/plain')
            elif img_format == 'svg':
                req.send(g.render(self.dot_path, img_format), 'image/svg+xml')
            elif img_format is not None:
                req.send(g.render(self.dot_path, img_format), 'text/plain')

            if self.use_gs:
                ps = g.render(self.dot_path, 'ps2')
                gs = subprocess.Popen([self.gs_path, '-q', '-dTextAlphaBits=4', '-dGraphicsAlphaBits=4', '-sDEVICE=png16m', '-sOutputFile=%stdout%', '-'],
                                      stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
                img, err = gs.communicate(ps)
                if err:
                    self.log.debug('MasterTickets: Error from gs: %s', err)
            else:
                img = g.render(self.dot_path)
            req.send(img, 'image/png')
        else:
            data = {
                'graph': g,
                'graph_render': partial(g.render, self.dot_path),
                'use_gs': self.use_gs,
                'full_graph': is_full_graph,
                'img_format': self.default_format,
                'summary': label_summary,
                'with_clusters': with_clusters,
            }

            if is_full_graph:
                rsc_url = None
            else:
                if is_milestone:
                    resource = milestone.resource
                    add_ctxtnav(req, _('Back to Milestone %(name)s', name=milestone.name),
                                get_resource_url(self.env, resource, req.href))
                    data['milestone'] = milestone.name
                else: # ticket
                    data['tkt'] = ticket
                    resource = ticket.resource
                    add_ctxtnav(req, _('Back to Ticket #%(id)s', id=ticket.id),
                                get_resource_url(self.env, resource, req.href))
                rsc_url = get_resource_url(self.env, resource)

            data['img_url'] = req.href.depgraph(rsc_url, 'depgraph.%s' % self.default_format,
                                                summary=g.label_summary, with_clusters=int(with_clusters))

            return 'depgraph.html', data, None
    def expand_macro(self, formatter, name, content):
        req = formatter.req
        args, kwargs = parse_args(content)
        args += [None, None]
        path, limit = args[:2]
        limit = kwargs.pop('limit', limit)
        package = kwargs.pop('package', None)

        if 'CHANGESET_VIEW' not in req.perm:
            return Markup('<i>Releases not available</i>')

        rm = RepositoryManager(self.env)
        reponame, repo, path = rm.get_repository_by_path(path);

        rev = repo.get_youngest_rev()
        rev = repo.normalize_rev(rev)
        path = repo.normalize_path(path)
        if limit is None:
            limit = 20
        else:
            limit = int(limit)

        releases = self.get_releases(repo, path, rev)

        # limit the releases after they have been sorted
        releases = releases[:1 + limit]
        items = []
        releases = [None] + releases + [None]

        # some extra checks to avoid using double-slashes
        if reponame == '':
            if path == '/':
                path = ''
            else:
                path = '/' + path
        elif path == '/':
            path = '/' + reponame.rstrip('/')
        else:
            path = '/' + reponame.rstrip('/') + '/' + path.lstrip('/')

        if not package:
            package = path.split("/")[-1]

        for i in xrange(len(releases) - 2):
            prev, cur, next = releases[i : i + 3]

            if prev == None and next == None:
                # no releases yet, just show trunk
                items.append(
                    " * "
                    " [/browser%(path)s/trunk trunk]"
                    " @[changeset:%(rev)s/%(reponame)s %(rev)s]"
                    " ("
                    "[/log%(path)s/trunk changes]"
                    " [/changeset?new_path=%(path)s/trunk diffs]"
                    ")"
                % {
                    'reponame' : reponame,
                    'path': path,
                    'rev': cur['rev'],
                })
            elif prev == None:
                # first entry = trunk
                items.append(
                    " * "
                    " [/browser%(path)s/trunk trunk]"
                    " @[changeset:%(rev)s/%(reponame)s %(rev)s]"
                    " ("
                    "[/log%(path)s/trunk?revs=%(stop_rev)s-%(rev)s changes]"
                    " [/changeset?old_path=%(path)s/tags/%(old_tag)s&new_path=%(path)s/trunk diffs]"
                    ")"
                % {
                    'reponame' : reponame,
                    'path': path,
                    'rev' : cur['rev'],
                    'old_tag' : next['version'],
                    'stop_rev' : next['rev'],
                })
            elif next != None:
                # regular releases
                release_page = 'release/%s-%s' % (package, cur['version'])
                page = WikiPage(self.env, release_page)
                if page.exists:
                    release_link = " [wiki:%s release notes]" % (release_page)
                else:
                    release_link = ""

                items.append(
                    " * '''%(date)s'''"
                    " [/log%(path)s/tags/%(new_tag)s %(new_tag)s] "
                    " @[changeset:%(rev)s/%(reponame)s %(rev)s]"
                    " by %(author)s"
                    " ("
                    "[/log%(path)s/trunk?revs=%(stop_rev)s-%(rev)s changes]"
                    " [/changeset?old_path=%(path)s/tags/%(old_tag)s&new_path=%(path)s/tags/%(new_tag)s diffs]"
                    "%(release_link)s"
                    ")"
                % {
                    'reponame' : reponame,
                    'path': path,
                    'date': cur['time'].strftime('%Y-%m-%d'),
                    'rev' : cur['rev'],
                    'stop_rev' : next['rev'],
                    'old_tag' : next['version'],
                    'new_tag' : cur['version'],
                    'author': cur['author'],
                    'release_link' : release_link,

                })
                url = self.specurl_annotate(cur);
                if url != None:
                    annotate = " spec: [%s annotate]" % url
                    items.append(annotate)
                    # check also diff link
                    url = self.specurl_diff(cur, next);
                    if url != None:
                        annotate = " [%s diff]" % url
                        items.append(annotate)
            else:
                # last release
                items.append(
                    " * '''%(date)s'''"
                    " [/log%(path)s/tags/%(new_tag)s?rev=%(rev)s&mode=follow_copy %(new_tag)s]"
                    " @[changeset:%(rev)s/%(reponame)s %(rev)s]"
                    " by %(author)s"
                % {
                    'reponame' : reponame,
                    'path': path,
                    'date': cur['time'].strftime('%Y-%m-%d'),
                    'rev' : cur['rev'],
                    'new_tag' : cur['version'],
                    'author': cur['author'],
                })

        return '<div class="releases">\n' + to_unicode(wiki_to_html("\n".join(items), self.env, req))  + '</div>\n'
Exemple #24
0
    def process_request(self, req):
        req.perm.require('STATS_VIEW')

        author = req.args.get('author', '')
        path = req.args.get('path', '')
        last = req.args.get('last', '')

        where = []
        if author:
            where.append("author = '%s'" % author.replace("'", "''"))
        since = 0
        if last:
            m = re.match('(\d+)m', last)
            w = re.match('(\d+)w', last)
            d = re.match('(\d+)d', last)
            if m is not None:
                now = time.time()
                months, = m.groups()
                ago = (24 * 60 * 60 * 30 * int(months))
                since = now - ago
                where.append('%s > %s' % (SECONDS, since))
            elif w is not None:
                now = time.time()
                weeks, = w.groups()
                ago = (24 * 60 * 60 * 7 * int(weeks))
                since = now - ago
                where.append('%s > %s' % (SECONDS, since))
            elif d is not None:
                now = time.time()
                days, = d.groups()
                ago = (24 * 60 * 60 * int(days))
                since = now - ago
                where.append('%s > %s' % (SECONDS, since))
        if where:
            where = 'where ' + ' and '.join(where)
        else:
            where = ''

        data = {}
        data['author'] = author
        data['last_1m'] = req.href.stats(path, last='1m', author=author)
        data['last_12m'] = req.href.stats(path, last='12m', author=author)
        data['last_all'] = req.href.stats(path, author=author)

        db_str = self.env.config.get('trac', 'database')
        db_type, db_path = db_str.split(':', 1)
        assert db_type in ('sqlite', 'mysql', 'postgres'), \
            'Unsupported database "%s"' % db_type
        self.db_type = db_type

        # Include trac wiki stylesheet
        add_stylesheet(req, 'common/css/wiki.css')

        # Include trac stats stylesheet
        add_stylesheet(req, 'stats/common.css')

        # Include javascript libraries
        add_script(req, 'stats/jquery.flot.min.js')
        add_script(req, 'stats/jquery.flot.time.min.js')
        add_script(req, 'stats/jquery.tablesorter.min.js')
        add_script(req, 'stats/jquery.sparkline.min.js')
        add_script(req, 'stats/excanvas.compiled.js')

        # Include context navigation links
        add_ctxtnav(req, 'Summary', req.href.stats())
        add_ctxtnav(req, 'Code', req.href.stats('code'))
        add_ctxtnav(req, 'Wiki', req.href.stats('wiki'))
        add_ctxtnav(req, 'Tickets', req.href.stats('tickets'))

        if hasattr(self.env, 'db_query'):
            db_query = self.env.db_query
        else:
            db_query = old_db_query(self.env)
        with db_query as db:
            cursor = db.cursor()

            if path == '/':
                data['title'] = 'Stats'
                result = self._process(req, cursor, where, data)

            elif path == '/code':
                data['title'] = 'Code' + (author and (' (%s)' % author))
                result = self._process_code(req, cursor, where, data)

            elif path == '/wiki':
                data['title'] = 'Wiki ' + (author and (' (%s)' % author))
                result = self._process_wiki(req, cursor, where, since, data)

            elif path == '/tickets':
                data['title'] = 'Tickets' + (author and (' (%s)' % author))
                result = self._process_tickets(req, cursor, where, since, data)

            else:
                raise ValueError, "unknown path '%s'" % path

        # Clean the unicode values for Genshi
        template_name, data, content_type = result
        new_data = {}
        for k, v in data.iteritems():
            if isinstance(v, str):
                new_data[k] = to_unicode(v)
            else:
                new_data[k] = v
        return template_name, new_data, content_type
Exemple #25
0
    def restore(self, backup_id, user_id):
        """
        Restores the database dump over the existing setup

        :param backup_id: Backup identifier, as an integer
        :param user_id: Id the user who did the restore

        Returns:
            A dictionary of the restored backup
        """
        backup = {}

        assert isinstance(user_id, long), 'User id needs to be long integer'
        assert isinstance(backup_id,
                          long), 'Backup id needs to be long integer'

        # Create dictionary containing the info about the backup
        backup = {
            'id': backup_id,
            'restored': datetime.utcnow(),
            'restored_by': user_id
        }

        # Open the db connection for adding the restore information, if any of the operations fail,
        # the database transaction will be rolled back in the context manager
        with admin_transaction() as cursor:
            # Update restore into to project_backup table. Use result count to check if the id was
            # actually found or not
            query = '''
                UPDATE project_backup
                SET restored=%s, restored_by=%s
                WHERE id = %s
            '''
            cursor.execute(
                query,
                (backup['restored'], backup['restored_by'], backup['id']))

            # Check if the backup_id was actually found?
            if not cursor.rowcount:
                raise TracError('Backup cannot be found')

            # Do the actual database restore
            try:
                mysqlp = self._get_mysql_process(self.env)
            except OSError, e:
                raise TracError(
                    _("Unable to run mysql command: %(msg)s",
                      msg=exception_to_unicode(e)))

            # Pass the backup into stdin
            backup_path = self.backup_path_tmpl % (self.project.env_name,
                                                   backup['id'])

            if not os.path.exists(backup_path):
                conf.log.error('User failed to restore project backup')
                raise TracError(_('Backup file cannot be found'))

            with open(backup_path, 'r+b') as backup_input:
                errmsg = mysqlp.communicate(input=backup_input.read())

            if mysqlp.returncode != 0:
                msg = _('Restoring the database backup failed: %(msg)s',
                        msg=to_unicode(errmsg.strip()))
                conf.log.error(msg)
                raise TracError(msg)
Exemple #26
0
    def process_request(self, req):
        req.perm.require('STATS_VIEW')

        author = req.args.get('author', '')
        path = req.args.get('path', '')
        last = req.args.get('last', '')

        where = []
        if author:
            where.append("author = '%s'" % author)
        if last:
            m = re.match('(\d+)m', last)
            if m is not None:
                now = time.time()
                months, = m.groups()
                ago = (24 * 60 * 60 * 30 * int(months))
                where.append('%s > %s' % (SECONDS, now - ago))
        if where:
            where = 'where ' + ' and '.join(where)
        else:
            where = ''

        data = {}
        data['author'] = author
        data['last_1m'] = req.href.stats(path, last='1m', author=author)
        data['last_12m'] = req.href.stats(path, last='12m', author=author)
        data['last_all'] = req.href.stats(path, author=author)

        db = self.env.get_db_cnx()
        cursor = db.cursor()

        db_str = self.env.config.get('trac', 'database')
        db_type, db_path = db_str.split(':', 1)
        assert db_type in ('sqlite', 'mysql', 'postgres'), \
            'Unsupported database "%s"' % db_type
        self.db_type = db_type

        # Include trac wiki stylesheet
        add_stylesheet(req, 'common/css/wiki.css')

        # Include trac stats stylesheet
        add_stylesheet(req, 'stats/common.css')

        # Include javascript libraries
        add_script(req, 'stats/jquery-1.4.3.min.js')
        add_script(req, 'stats/jquery.flot.min.js')
        add_script(req, 'stats/jquery.tablesorter.min.js')
        add_script(req, 'stats/jquery.sparkline.min.js')
        add_script(req, 'stats/excanvas.pack.js')

        # Include context navigation links
        add_ctxtnav(req, 'Summary', req.href.stats())
        add_ctxtnav(req, 'Code', req.href.stats('code'))
        add_ctxtnav(req, 'Wiki', req.href.stats('wiki'))
        add_ctxtnav(req, 'Tickets', req.href.stats('tickets'))

        if path == '/':
            data['title'] = 'Stats'
            result = self._process(req, cursor, where, data)
            cursor.close()

        elif path == '/code':
            data['title'] = 'Code' + (author and (' (%s)' % author))
            result = self._process_code(req, cursor, where, data)
            cursor.close()

        elif path == '/wiki':
            data['title'] = 'Wiki ' + (author and (' (%s)' % author))
            result = self._process_wiki(req, cursor, where, data)
            cursor.close()

        elif path == '/tickets':
            data['title'] = 'Tickets' + (author and (' (%s)' % author))
            result = self._process_tickets(req, cursor, where, data)
            cursor.close()

        else:
            cursor.close()
            raise ValueError, "unknown path '%s'" % path

        # Clean the unicode values for Genshi
        template_name, data, content_type = result
        new_data = {}
        for k, v in data.iteritems():
            if isinstance(v, str):
                new_data[k] = to_unicode(v)
            else:
                new_data[k] = v
        return template_name, new_data, content_type
    def expand_macro(self, formatter, name, content):
        req = formatter.req
        args, kwargs = parse_args(content)
        args += [None, None]
        path, limit = args[:2]
        limit = kwargs.pop('limit', limit)
        package = kwargs.pop('package', None)

        if 'CHANGESET_VIEW' not in req.perm:
            return Markup('<i>Releases not available</i>')

        rm = RepositoryManager(self.env)
        reponame, repo, path = rm.get_repository_by_path(path)

        rev = repo.get_youngest_rev()
        rev = repo.normalize_rev(rev)
        path = repo.normalize_path(path)
        if limit is None:
            limit = 20
        else:
            limit = int(limit)

        releases = self.get_releases(repo, path, rev)

        # limit the releases after they have been sorted
        releases = releases[:1 + limit]
        items = []
        releases = [None] + releases + [None]

        # some extra checks to avoid using double-slashes
        if reponame == '':
            if path == '/':
                path = ''
            else:
                path = '/' + path
        elif path == '/':
            path = '/' + reponame.rstrip('/')
        else:
            path = '/' + reponame.rstrip('/') + '/' + path.lstrip('/')

        if not package:
            package = path.split("/")[-1]

        for i in xrange(len(releases) - 2):
            prev, cur, next = releases[i:i + 3]

            if prev == None and next == None:
                # no releases yet, just show trunk
                items.append(" * "
                             " [/browser%(path)s/trunk trunk]"
                             " @[changeset:%(rev)s/%(reponame)s %(rev)s]"
                             " ("
                             "[/log%(path)s/trunk changes]"
                             " [/changeset?new_path=%(path)s/trunk diffs]"
                             ")" % {
                                 'reponame': reponame,
                                 'path': path,
                                 'rev': cur['rev'],
                             })
            elif prev == None:
                # first entry = trunk
                items.append(
                    " * "
                    " [/browser%(path)s/trunk trunk]"
                    " @[changeset:%(rev)s/%(reponame)s %(rev)s]"
                    " ("
                    "[/log%(path)s/trunk?revs=%(stop_rev)s-%(rev)s changes]"
                    " [/changeset?old_path=%(path)s/tags/%(old_tag)s&new_path=%(path)s/trunk diffs]"
                    ")" % {
                        'reponame': reponame,
                        'path': path,
                        'rev': cur['rev'],
                        'old_tag': next['version'],
                        'stop_rev': next['rev'],
                    })
            elif next != None:
                # regular releases
                release_page = 'release/%s-%s' % (package, cur['version'])
                page = WikiPage(self.env, release_page)
                if page.exists:
                    release_link = " [wiki:%s release notes]" % (release_page)
                else:
                    release_link = ""

                items.append(
                    " * '''%(date)s'''"
                    " [/log%(path)s/tags/%(new_tag)s %(new_tag)s] "
                    " @[changeset:%(rev)s/%(reponame)s %(rev)s]"
                    " by %(author)s"
                    " ("
                    "[/log%(path)s/trunk?revs=%(stop_rev)s-%(rev)s changes]"
                    " [/changeset?old_path=%(path)s/tags/%(old_tag)s&new_path=%(path)s/tags/%(new_tag)s diffs]"
                    "%(release_link)s"
                    ")" % {
                        'reponame': reponame,
                        'path': path,
                        'date': cur['time'].strftime('%Y-%m-%d'),
                        'rev': cur['rev'],
                        'stop_rev': next['rev'],
                        'old_tag': next['version'],
                        'new_tag': cur['version'],
                        'author': cur['author'],
                        'release_link': release_link,
                    })
                url = self.specurl_annotate(cur)
                if url != None:
                    annotate = " spec: [%s annotate]" % url
                    items.append(annotate)
                    # check also diff link
                    url = self.specurl_diff(cur, next)
                    if url != None:
                        annotate = " [%s diff]" % url
                        items.append(annotate)
            else:
                # last release
                items.append(
                    " * '''%(date)s'''"
                    " [/log%(path)s/tags/%(new_tag)s?rev=%(rev)s&mode=follow_copy %(new_tag)s]"
                    " @[changeset:%(rev)s/%(reponame)s %(rev)s]"
                    " by %(author)s" % {
                        'reponame': reponame,
                        'path': path,
                        'date': cur['time'].strftime('%Y-%m-%d'),
                        'rev': cur['rev'],
                        'new_tag': cur['version'],
                        'author': cur['author'],
                    })

        return '<div class="releases">\n' + to_unicode(
            wiki_to_html("\n".join(items), self.env, req)) + '</div>\n'
Exemple #28
0
    def process_request(self, req):
        req.perm.require('TAGS_VIEW')

        match = re.match(r'/tags/?(.*)', req.path_info)
        tag_id = match.group(1) and match.group(1) or None
        query = req.args.get('q', '')

        # Consider only providers, that are permitted for display.
        tag_system = TagSystem(self.env)
        all_realms = tag_system.get_taggable_realms(req.perm)
        if not (tag_id or query) or [r for r in all_realms if r in req.args
                                     ] == []:
            for realm in all_realms:
                if not realm in self.exclude_realms:
                    req.args[realm] = 'on'
        checked_realms = [r for r in all_realms if r in req.args]
        if query:
            # Add permitted realms from query expression.
            checked_realms.extend(query_realms(query, all_realms))
        realm_args = dict(
            zip([r for r in checked_realms], ['on' for r in checked_realms]))
        # Switch between single tag and tag query expression mode.
        if tag_id and not re.match(r"""(['"]?)(\S+)\1$""", tag_id, re.UNICODE):
            # Convert complex, invalid tag ID's --> query expression.
            req.redirect(req.href.tags(realm_args, q=tag_id))
        elif query:
            single_page = re.match(r"""(['"]?)(\S+)\1$""", query, re.UNICODE)
            if single_page:
                # Convert simple query --> single tag.
                req.redirect(req.href.tags(single_page.group(2), realm_args))

        data = dict(page_title=_("Tags"), checked_realms=checked_realms)
        # Populate the TagsQuery form field.
        data['tag_query'] = tag_id and tag_id or query
        data['tag_realms'] = list(
            dict(name=realm, checked=realm in checked_realms)
            for realm in all_realms)
        if tag_id:
            data['tag_page'] = WikiPage(self.env,
                                        tag_system.wiki_page_prefix + tag_id)
        if query or tag_id:
            macro = 'ListTagged'
            # TRANSLATOR: The meta-nav link label.
            add_ctxtnav(req, _("Back to Cloud"), req.href.tags())
            args = "%s,format=%s,cols=%s" % \
                   (tag_id and tag_id or query, self.default_format,
                    self.default_cols)
            data['mincount'] = None
        else:
            macro = 'TagCloud'
            mincount = as_int(req.args.get('mincount', None),
                              self.cloud_mincount)
            args = mincount and "mincount=%s" % mincount or None
            data['mincount'] = mincount
        formatter = Formatter(self.env,
                              Context.from_request(req, Resource('tag')))
        self.env.log.debug("%s macro arguments: %s" %
                           (macro, args and args or '(none)'))
        macros = TagWikiMacros(self.env)
        try:
            # Query string without realm throws 'NotImplementedError'.
            data['tag_body'] = checked_realms and \
                               macros.expand_macro(formatter, macro, args,
                                                   realms=checked_realms) \
                               or ''
        except InvalidQuery, e:
            data['tag_query_error'] = to_unicode(e)
            data['tag_body'] = macros.expand_macro(formatter, 'TagCloud', '')
Exemple #29
0
    def expand_macro(self, formatter, name, text, args=None):

        assert text.isdigit(), "Argument must be a number"

        out = "<dl class='lastevents'>"
        add_stylesheet(formatter.req, 'tracprojectmanager/css/lastevents.css')

        all_events = []
        if hasattr(self.env, 'cached_lastevents'):
            expiration = self.env.cached_lastevents[1] + timedelta(
                seconds=EVENT_CACHE_INTERVAL)
            if datetime.now() < expiration:
                all_events = self.env.cached_lastevents[0]

        if not all_events:
            stop = datetime.now(formatter.req.tz)
            start = stop - timedelta(days=EVENT_MAX_DAYS)

            projects = get_project_list(self.env, formatter.req)
            user = formatter.req.authname

            for project, project_path, project_url, env in projects:
                env_timeline = TimelineModule(env)
                for provider in env_timeline.event_providers:
                    filters = [
                        x[0]
                        for x in provider.get_timeline_filters(formatter.req)
                    ]
                    self.env.log.info("Project %s - Filters: %s", project,
                                      filters)
                    try:
                        events = provider.get_timeline_events(
                            formatter.req, start, stop, filters)
                        #self.env.log.info("Event count: %d", len([x for x in events]))

                        for event in events:
                            context = Context(formatter.resource,
                                              Href(project_url),
                                              formatter.req.perm)
                            context.req = formatter.req
                            #context = Context.from_request(formatter.req)

                            if len(event) == 6:  # 0.10 events
                                kind, url, title, date, author, desc = event
                            else:  # 0.11 events
                                if len(event) == 5:  # with special provider
                                    kind, date, author, data, provider = event
                                else:
                                    kind, date, author, data = event

                                title = to_unicode(
                                    provider.render_timeline_event(
                                        context, 'title', event))
                                url = provider.render_timeline_event(
                                    context, 'url', event)
                                desc = to_unicode(
                                    provider.render_timeline_event(
                                        context, 'description', event))

                            all_events.append((project, kind, date, title, url,
                                               author, desc))
                    except Exception, ex:
                        #import sys
                        self.env.log.warning("Exception: %s" %
                                             traceback.format_exc())
                        #out = out + "%s<br/>" % traceback.format_exc()

            all_events.sort(cmp=lambda x, y: x[2] < y[2] and 1 or -1)
            self.env.cached_lastevents = [all_events, datetime.now()]
Exemple #30
0
    def process_request(self, req):
        req.perm.require("STATS_VIEW")

        author = req.args.get("author", "")
        path = req.args.get("path", "")
        last = req.args.get("last", "")

        where = []
        if author:
            where.append("author = '%s'" % author)
        since = 0
        if last:
            m = re.match("(\d+)m", last)
            w = re.match("(\d+)w", last)
            d = re.match("(\d+)d", last)
            if m is not None:
                now = time.time()
                months, = m.groups()
                ago = 24 * 60 * 60 * 30 * int(months)
                since = now - ago
                where.append("%s > %s" % (SECONDS, since))
            elif w is not None:
                now = time.time()
                weeks, = w.groups()
                ago = 24 * 60 * 60 * 7 * int(weeks)
                since = now - ago
                where.append("%s > %s" % (SECONDS, since))
            elif d is not None:
                now = time.time()
                days, = d.groups()
                ago = 24 * 60 * 60 * int(days)
                since = now - ago
                where.append("%s > %s" % (SECONDS, since))
        if where:
            where = "where " + " and ".join(where)
        else:
            where = ""

        data = {}
        data["author"] = author
        data["last_1m"] = req.href.stats(path, last="1m", author=author)
        data["last_12m"] = req.href.stats(path, last="12m", author=author)
        data["last_all"] = req.href.stats(path, author=author)

        db = self.env.get_db_cnx()
        cursor = db.cursor()

        db_str = self.env.config.get("trac", "database")
        db_type, db_path = db_str.split(":", 1)
        assert db_type in ("sqlite", "mysql", "postgres"), 'Unsupported database "%s"' % db_type
        self.db_type = db_type

        # Include trac wiki stylesheet
        add_stylesheet(req, "common/css/wiki.css")

        # Include trac stats stylesheet
        add_stylesheet(req, "stats/common.css")

        # Include javascript libraries
        add_script(req, "stats/jquery-1.4.3.min.js")
        add_script(req, "stats/jquery.flot.min.js")
        add_script(req, "stats/jquery.tablesorter.min.js")
        add_script(req, "stats/jquery.sparkline.min.js")
        add_script(req, "stats/excanvas.compiled.js")

        # Include context navigation links
        add_ctxtnav(req, "Summary", req.href.stats())
        add_ctxtnav(req, "Code", req.href.stats("code"))
        add_ctxtnav(req, "Wiki", req.href.stats("wiki"))
        add_ctxtnav(req, "Tickets", req.href.stats("tickets"))

        if path == "/":
            data["title"] = "Stats"
            result = self._process(req, cursor, where, data)
            cursor.close()

        elif path == "/code":
            data["title"] = "Code" + (author and (" (%s)" % author))
            result = self._process_code(req, cursor, where, data)
            cursor.close()

        elif path == "/wiki":
            data["title"] = "Wiki " + (author and (" (%s)" % author))
            result = self._process_wiki(req, cursor, where, since, data)
            cursor.close()

        elif path == "/tickets":
            data["title"] = "Tickets" + (author and (" (%s)" % author))
            result = self._process_tickets(req, cursor, where, since, data)
            cursor.close()

        else:
            cursor.close()
            raise ValueError, "unknown path '%s'" % path

        # Clean the unicode values for Genshi
        template_name, data, content_type = result
        new_data = {}
        for k, v in data.iteritems():
            if isinstance(v, str):
                new_data[k] = to_unicode(v)
            else:
                new_data[k] = v
        return template_name, new_data, content_type
Exemple #31
0
                content = tag.html(
                    tag.body(tag.strong("DB Error: " + unicode(e))))
            html = content.generate().render("xhtml")
            req.send_response(200)
            req.send_header('Cache-control', 'must-revalidate')
            req.send_header('Content-Type', 'text/html;charset=utf-8')
            req.send_header('Content-Length', len(html))
            req.end_headers()

            if req.method != 'HEAD':
                req.write(html)
            raise RequestDone

        try:
            hash = req.path_info[9:-3]
            mm = to_unicode(self._get_cache(hash)).encode('utf-8')
            req.send_response(200)
            req.send_header('Cache-control', 'must-revalidate')
            req.send_header('Content-Type', 'application/x-freemind')
            req.send_header('Content-Length', len(mm))
            req.end_headers()
            if req.method != 'HEAD':
                req.write(mm)
        except RequestDone:
            pass
        except Exception, e:
            self.log.error(e)
            req.send_response(500)
            try:
                req.end_headers()
                req.write(str(e))