Пример #1
0
def index():
    lastMade = TICKET_REPO.get_created_tickets(current_user.id)
    for ticket in lastMade:
        ticket.name = make_thumbnail(ticket.name, TICKET_THUMBNAIL_MAX_LENGTH)
        ticket.author_id.username = make_thumbnail(
            ticket.author_id.clientname, AUTHOR_THUMBNAIL_MAX_LENGTH)
        ticket.product_id.name = make_thumbnail(ticket.product_id.name,
                                                PRODUCT_THUMBNAIL_MAX_LENGTH)
        ticket.creation_date = format_date(ticket)

    lastCommented = TICKET_REPO.get_commented_tickets(current_user.id)
    for ticket in lastCommented:
        ticket.name = make_thumbnail(ticket.name, TICKET_THUMBNAIL_MAX_LENGTH)
        ticket.author_id.username = make_thumbnail(
            ticket.author_id.clientname, AUTHOR_THUMBNAIL_MAX_LENGTH)
        ticket.product_id.name = make_thumbnail(ticket.product_id.name,
                                                PRODUCT_THUMBNAIL_MAX_LENGTH)
        ticket.creation_date = format_date(ticket)
    if current_user.position_id.id > CUSTOMER:
        assignedTasks = TASK_REPO.get_user_tasks_open(current_user.id)
        return render_template("dashboard.html",
                               user=current_user,
                               assignedTasks=assignedTasks,
                               madeTickets=lastMade,
                               commentedTickets=lastCommented,
                               search_image="/Static/search.png")

    return render_template("dashboard.html",
                           user=current_user,
                           madeTickets=lastMade,
                           commentedTickets=lastCommented,
                           search_image="/Static/search.png")
Пример #2
0
def check_csv(file_name, year=None, exclude=[]):
    '''
    Check for missing editorials
    year: check every day in the specified year
    exclude: list of days to exclude from checking (eg. to exclude Sunday [6])
    '''
    articles = csv.reader(open(file_name, 'rb'), delimiter=',', quotechar='"')
    article_dates = []
    missing_dates = []
    for article in articles:
        # Get the date
        date = parse_date(article[6])
        article_dates.append(date)
    article_dates.sort()
    duplicates = find_duplicates(article_dates)
    print 'Duplicates: %s' % len(duplicates)
    if year:
        start_date = datetime.date(year, 1, 1)
        end_date = datetime.date(year, 12, 31)
    else:
        start_date = article_dates[0]
        end_date = article_dates[-1]
    one_day = datetime.timedelta(days=1)
    this_day = start_date
    # Loop through each day in specified period to see if there's an article
    # If not, add to the missing_dates list.
    while this_day <= end_date:
        if this_day.weekday() not in exclude:  #exclude Sunday
            if this_day not in article_dates:
                missing_dates.append(this_day)
        this_day += one_day
    print 'Missing: %s' % len(missing_dates)
    csv_out = csv.DictWriter(open(file_name, 'ab'),
                             extrasaction='ignore',
                             fieldnames=[
                                 'id', 'title', 'url', 'newspaper_title',
                                 'newspaper_details', 'newspaper_id',
                                 'issue_date', 'page', 'page_url',
                                 'corrections', 'ftext'
                             ],
                             dialect=csv.excel)
    # Write a results file with nicely formatted dates
    with open(os.path.join(os.path.dirname(file_name), 'csv_check.html'),
              'wb') as results:
        results.write(
            '<html>\n<head>\n  <title>Results</title>\n</head>\n<body>')
        results.write('<h2>Duplicates:</h2>\n<table>\n')
        for dup in duplicates:
            results.write(
                '<tr><td>%s</td><td><a href="%s">View issue</a></td></tr>\n' %
                (format_date(dup), get_issue_url(dup, '35')))
        results.write('</table>\n')
        results.write('<h2>Missing:</h2>\n<table>\n')
        for missing in missing_dates:
            results.write(
                '<tr><td>%s</td><td><a href="%s">View issue</a></td></tr>\n' %
                (format_date(missing), get_issue_url(missing, '35')))
            csv_out.writerow({'issue_date': format_date(missing)})
        results.write('</table>\n')
        results.write('</body>\n</html>')
Пример #3
0
def base_search():
    query = request.args.get("query")
    tickets = TICKET_REPO.search_ticket(query)

    for ticket in tickets:
        format_date(ticket)
        ticket.name = make_thumbnail(ticket.name, THUMBNAIL_LENGTH)

    if tickets == True:
        return redirect(f"/tickets/{query}")
    else:
        return render_template("search.html", user=current_user, tickets=tickets)        
Пример #4
0
def product_ticket(productId, ticketId):
    ticketForm = CreateTicketForm()
    commentForm = CreateCommentForm()

    # Product exists
    if PRODUCT_REPO.check_product(productId):
        # Get the ticket
        ticket = TICKET_REPO.get_ticket(ticketId)
        # Ticket exists
        if ticket:
            # Get ticket comments
            # If ticket form was sent, update the ticket
            if ticketForm.validate_on_submit():
                ticket.name = ticketForm.title.data
                ticket.description = ticketForm.description.data
                try:
                    ticket.save()
                except PeeweeException:
                    flash("Cannot save! Check length of elements!", "ticket")

            # If comment form was sent, create new comment
            elif commentForm.validate_on_submit():
                if ticket.closed == True:
                    return abort(HTTP_BAD_REQUEST)
                try:
                    imageName = handle_image(commentForm.image)
                except InvalidFile:
                    return flash("Invalid image uploaded!", "ticket")

                comment = commentForm.content.data
                try:
                    COMMENT_REPO.create_ticket_comment(comment, imageName,
                                                       ticketId,
                                                       current_user.id)
                except PeeweeException:
                    flash("Cannot save! Check length of elements!", "ticket")
            # Else just return prefilled forms to enable editing

            comments = COMMENT_REPO.get_ticket_comments(ticketId)
            format_date(ticket)
            for comment in comments:
                format_date(comment)
            ticketForm.description.data = ticket.description
            ticketForm.title.data = ticket.name

            return render_template("ticket.html",
                                   ticketForm=ticketForm,
                                   user=current_user,
                                   ticket=ticket,
                                   comments=comments,
                                   commentForm=commentForm)
    return abort(HTTP_NOT_FOUND)
Пример #5
0
def product_tickets(productId):
    product = PRODUCT_REPO.get_product(productId)
    if product:
        tickets = TICKET_REPO.get_product_tickets(productId)
        for ticket in tickets:
            ticket.name = make_thumbnail(ticket.name,
                                         TICKET_THUMBNAIL_MAX_LENGTH)
            ticket.author_id.username = make_thumbnail(
                ticket.author_id.clientname, AUTHOR_THUMBNAIL_MAX_LENGTH)
            format_date(ticket)
        return render_template("tickets.html",
                               tickets=tickets,
                               user=current_user,
                               product=product)
    return abort(HTTP_NOT_FOUND)
Пример #6
0
def check_csv(file_name, year=None, exclude=[]):
    '''
    Check for missing editorials
    year: check every day in the specified year
    exclude: list of days to exclude from checking (eg. to exclude Sunday [6])
    '''
    articles = csv.reader(open(file_name, 'rb'), delimiter=',', quotechar='"')
    article_dates = []
    missing_dates = []
    for article in articles:
        # Get the date
        date = parse_date(article[6])
        article_dates.append(date)
    article_dates.sort()
    duplicates = find_duplicates(article_dates)
    print 'Duplicates: %s' % len(duplicates)
    if year:
        start_date = datetime.date(year, 1, 1)
        end_date = datetime.date(year, 12, 31)
    else:
        start_date = article_dates[0]
        end_date = article_dates[-1]
    one_day = datetime.timedelta(days=1)
    this_day = start_date
    # Loop through each day in specified period to see if there's an article
    # If not, add to the missing_dates list.
    while this_day <= end_date:
        if this_day.weekday() not in exclude: #exclude Sunday
            if this_day not in article_dates:
                missing_dates.append(this_day)
        this_day += one_day
    print 'Missing: %s' % len(missing_dates)
    csv_out = csv.DictWriter(open(file_name, 'ab'), extrasaction='ignore', 
                                       fieldnames=['id', 'title', 'url', 
                                                   'newspaper_title', 'newspaper_details', 
                                                   'newspaper_id', 'issue_date', 'page', 
                                                   'page_url','corrections','ftext'], 
                                                   dialect=csv.excel)
    # Write a results file with nicely formatted dates
    with open(os.path.join(os.path.dirname(file_name), 'csv_check.html'), 'wb') as results:
        results.write('<html>\n<head>\n  <title>Results</title>\n</head>\n<body>')
        results.write('<h2>Duplicates:</h2>\n<table>\n')
        for dup in duplicates:
            results.write('<tr><td>%s</td><td><a href="%s">View issue</a></td></tr>\n' % (format_date(dup), get_issue_url(dup, '35')))
        results.write('</table>\n')
        results.write('<h2>Missing:</h2>\n<table>\n')
        for missing in missing_dates:
            results.write('<tr><td>%s</td><td><a href="%s">View issue</a></td></tr>\n' % (format_date(missing), get_issue_url(missing, '35')))
            csv_out.writerow({'issue_date': format_date(missing)})
        results.write('</table>\n')
        results.write('</body>\n</html>')
Пример #7
0
def date(value):
    if value:
        return utilities.format_date(value)
    return "—"
Пример #8
0
def date(value):
    if value:        
        return utilities.format_date(value)
    return '—'