예제 #1
0
 def log_lines(self, start_page, end_page):
     "Returns the log lines and the previous/next timestamps, with images mixed in."
     if end_page > (start_page + 5):
         end_page = start_page + 5
     # Collect the log lines
     log_lines = []
     done_closest = False
     for page in range(start_page, end_page + 1):
         log_lines += list(self.main_transcript_query().page(page))
     for log_line in log_lines:
         log_line.images = list(log_line.images())
         log_line.lines = [(s, linkify(t, self.request)) for s, t in log_line.lines]
         # If this is the first after the start time, add an anchor later
         if log_line.timestamp > timestamp_to_seconds(self.kwargs.get("start", "00:00:00:00")) and not done_closest:
             log_line.closest = True
             done_closest = True
     # Find all media that falls inside this same range, and add it onto the preceding line.
     for image_line in self.media_transcript_query().range(log_lines[0].timestamp, log_lines[-1].timestamp):
         # Find the line just before the images
         last_line = None
         for log_line in log_lines:
             if log_line.timestamp > image_line.timestamp:
                 break
             last_line = log_line
         # Add the images to it
         last_line.images += image_line.images()
     # Find the previous log line from this, and then the beginning of its page
     try:
         previous_timestamp = self.main_transcript_query().page(start_page - 1).first().timestamp
     except ValueError:
         previous_timestamp = None
     # Find the next log line and its timestamp
     next_timestamp = log_lines[-1].next_timestamp()
     # Return
     return log_lines, previous_timestamp, next_timestamp, 0, None
예제 #2
0
def selection_url(start_seconds, end_seconds=None):
    if end_seconds is None:
        url = reverse("view_range", kwargs={"start": mission_time(start_seconds)})
    else:
        url = reverse("view_range", kwargs={"start": mission_time(start_seconds), "end": mission_time(end_seconds)})
    if isinstance(start_seconds, basestring):
        start_seconds = timestamp_to_seconds(start_seconds)
    return '%s#log-line-%i' % ( url, start_seconds )
예제 #3
0
def selection_url(start_seconds, end_seconds=None):
    if end_seconds is None:
        url = reverse("view_range", kwargs={"start": mission_time(start_seconds)})
    else:
        url = reverse("view_range", kwargs={"start": mission_time(start_seconds), "end": mission_time(end_seconds)})
    if isinstance(start_seconds, basestring):
        start_seconds = timestamp_to_seconds(start_seconds)
    return '%s#log-line-%i' % ( url, start_seconds )
예제 #4
0
파일: views.py 프로젝트: ariel/Spacelog
 def get_quote(self):
     quote_timestamp = self.request.redis_conn.srandmember(
         "mission:%s:homepage_quotes" % self.request.mission.name,
     )
     if quote_timestamp:
         return LogLine(
             self.request.redis_conn,
             self.request.mission.main_transcript,
             int(timestamp_to_seconds(quote_timestamp)),
         )
예제 #5
0
파일: views.py 프로젝트: groodt/Spacelog
    def get_context_data(self, start=None, end=None):

        if end is None:
            end = start

        # Get the content
        log_lines, previous_timestamp, next_timestamp, max_highlight_index, first_highlighted_line = self.log_lines(
            self.page_number(start),
            self.page_number(end),
        )
        
        act          = log_lines[0].act()
        act_id       = log_lines[0].act().number
        acts         = list(self.act_query().items())
        previous_act = None
        next_act     = None
        
        if act_id > 0:
            previous_act = acts[act_id-1]
        if act_id < len(acts) - 1:
            next_act = acts[act_id+1]
        
        for log_line in log_lines:
            if log_line.transcript_page:
                original_transcript_page = log_line.transcript_page
                break
        else:
            original_transcript_page = None
        
        if start:
            permalink_fragment = '#log-line-%s' % timestamp_to_seconds(start)
        else:
            permalink_fragment = '#log-line-%s' % log_lines[0].timestamp
        
        return {
            'start' : start,
            'log_lines': log_lines,
            'next_timestamp': next_timestamp,
            'previous_timestamp': previous_timestamp,
            'acts': acts,
            'act': act_id+1,
            'current_act': act,
            'previous_act': previous_act,
            'next_act': next_act,
            'max_highlight_index': max_highlight_index,
            'first_highlighted_line': first_highlighted_line,
            'original_transcript_page': original_transcript_page,
            'permalink': 'http://%s%s%s' % (
                self.request.META['HTTP_HOST'],
                self.request.path,
                permalink_fragment,
            )
        }
예제 #6
0
파일: views.py 프로젝트: Spacelog/Spacelog
 def render_to_response(self, context):
     # Ensure that the request is always redirected to:
     # - The first page (timestampless)
     # - The timestamp for the start of an act
     # - The timestamp for the start of an in-act page
     # If the timestamp is already one of these, render as normal
     
     requested_start       = None
     if context['start']:
         requested_start   = timestamp_to_seconds( context['start'] )
     current_act           = context['current_act']
     first_log_line        = context['log_lines'][0]
     prior_log_line        = first_log_line.previous()
     
     # NOTE: is_act_first_page will be false for first act:
     #       that's handled by is_first_page
     is_first_page         = not prior_log_line
     is_act_first_page     = False
     if prior_log_line:
         is_act_first_page = prior_log_line.timestamp < current_act.start \
                          <= first_log_line.timestamp
     
     page_start_url = None
     # If we're on the first page, but have a timestamp,
     # redirect to the bare page URL
     if requested_start and is_first_page:
         if context['transcript_name'] != context['mission_main_transcript']:
             # Split transcript name from [mission]/[transcript]
             transcript = context['transcript_name'].split('/')[1]
             page_start_url = reverse("view_page", kwargs={"transcript": transcript})
         else:
             page_start_url = reverse("view_page")
     # If we're on the first page of an act,
     # but not on the act-start timestamp, redirect to that
     elif is_act_first_page \
     and requested_start != current_act.start:
         page_start_url = timestamp_to_url( context, current_act.start )
     # If we're on any other page and the timestamp doesn't match
     # the timestamp of the first item, redirect to that item's timestamp
     elif requested_start and not is_act_first_page \
     and requested_start != first_log_line.timestamp:
         page_start_url = timestamp_to_url(
             context,
             first_log_line.timestamp
         )
     
     # Redirect to the URL we found
     if page_start_url:
         if self.request.GET:
             page_start_url += '?%s' % self.request.GET.urlencode()
         return HttpResponseRedirect( page_start_url )
     
     return super( PageView, self ).render_to_response( context )
예제 #7
0
파일: views.py 프로젝트: benburry/Spacelog
    def render_to_response(self, context):
        # Ensure that the request is always redirected to:
        # - The first page (timestampless)
        # - The timestamp for the start of an act
        # - The timestamp for the start of an in-act page
        # If the timestamp is already one of these, render as normal

        requested_start = None
        if context['start']:
            requested_start = timestamp_to_seconds(context['start'])
        current_act = context['current_act']
        first_log_line = context['log_lines'][0]
        prior_log_line = first_log_line.previous()

        # NOTE: is_act_first_page will be false for first act:
        #       that's handled by is_first_page
        is_first_page = not prior_log_line
        is_act_first_page = False
        if prior_log_line:
            is_act_first_page = prior_log_line.timestamp < current_act.start \
                             <= first_log_line.timestamp

        page_start_url = None
        # If we're on the first page, but have a timestamp,
        # redirect to the bare page URL
        if requested_start and is_first_page:
            if context['transcript_name'] != context['mission_main_transcript']:
                # Split transcript name from [mission]/[transcript]
                transcript = context['transcript_name'].split('/')[1]
                page_start_url = reverse("view_page",
                                         kwargs={"transcript": transcript})
            else:
                page_start_url = reverse("view_page")
        # If we're on the first page of an act,
        # but not on the act-start timestamp, redirect to that
        elif is_act_first_page \
        and requested_start != current_act.start:
            page_start_url = timestamp_to_url(context, current_act.start)
        # If we're on any other page and the timestamp doesn't match
        # the timestamp of the first item, redirect to that item's timestamp
        elif requested_start and not is_act_first_page \
        and requested_start != first_log_line.timestamp:
            page_start_url = timestamp_to_url(context,
                                              first_log_line.timestamp)

        # Redirect to the URL we found
        if page_start_url:
            if self.request.GET:
                page_start_url += '?%s' % self.request.GET.urlencode()
            return HttpResponseRedirect(page_start_url)

        return super(PageView, self).render_to_response(context)
예제 #8
0
파일: views.py 프로젝트: Spacelog/Spacelog
def stream(request, start):
    bitrate = 48000
    offset = 555
    file_path = os.path.join(settings.SITE_ROOT, '../missions/mr3/audio/ATG.mp3')
    start = timestamp_to_seconds(start)
    offset = int((start + offset) * bitrate / 8)
    file_size = os.path.getsize(file_path)
    if offset > file_size or offset < 0:
        raise Http404
    fh = open(file_path, 'r')
    fh.seek(offset)
    response = HttpResponse(ProgressiveFileWrapper(fh, int(bitrate / 8), 1))
    response['Content-Type'] = 'audio/mpeg'
    return response
예제 #9
0
파일: views.py 프로젝트: niksbiks/Spacelog
def stream(request, start):
    bitrate = 48000
    offset = 555
    file_path = os.path.join(settings.SITE_ROOT,
                             '../missions/mr3/audio/ATG.mp3')
    start = timestamp_to_seconds(start)
    offset = int((start + offset) * bitrate / 8)
    file_size = os.path.getsize(file_path)
    if offset > file_size or offset < 0:
        raise Http404
    fh = open(file_path, 'r')
    fh.seek(offset)
    response = HttpResponse(ProgressiveFileWrapper(fh, int(bitrate / 8), 1))
    response['Content-Type'] = 'audio/mpeg'
    return response
예제 #10
0
    def get_context_data(self, start=None, end=None):

        if end is None:
            end = start

        # Get the content
        log_lines, previous_timestamp, next_timestamp, max_highlight_index, first_highlighted_line = self.log_lines(
            self.page_number(start), self.page_number(end)
        )

        act = log_lines[0].act()
        act_id = log_lines[0].act().number
        acts = list(self.act_query().items())
        previous_act = None
        next_act = None

        if act_id > 0:
            previous_act = acts[act_id - 1]
        if act_id < len(acts) - 1:
            next_act = acts[act_id + 1]

        for log_line in log_lines:
            if log_line.transcript_page:
                original_transcript_page = log_line.transcript_page
                break
        else:
            original_transcript_page = None

        if start:
            permalink_fragment = "#log-line-%s" % timestamp_to_seconds(start)
        else:
            permalink_fragment = "#log-line-%s" % log_lines[0].timestamp

        return {
            "start": start,
            "log_lines": log_lines,
            "next_timestamp": next_timestamp,
            "previous_timestamp": previous_timestamp,
            "acts": acts,
            "act": act_id + 1,
            "current_act": act,
            "previous_act": previous_act,
            "next_act": next_act,
            "max_highlight_index": max_highlight_index,
            "first_highlighted_line": first_highlighted_line,
            "original_transcript_page": original_transcript_page,
            "permalink": "http://%s%s%s" % (self.request.META["HTTP_HOST"], self.request.path, permalink_fragment),
        }
예제 #11
0
 def get_quote(self):
     quote_timestamp = self.request.redis_conn.srandmember(
         "mission:%s:homepage_quotes" % self.request.mission.name,
     )
     if quote_timestamp:
         if '/' in quote_timestamp:
             transcript, timestamp = quote_timestamp.rsplit('/', 1)
             transcript = "%s/%s" % (self.request.mission.name, transcript)
         else:
             transcript = self.request.mission.main_transcript
             timestamp = quote_timestamp
         return LogLine(
             self.request.redis_conn,
             transcript,
             int(timestamp_to_seconds(timestamp)),
         )
예제 #12
0
def selection_url_in_transcript(context, start_seconds, transcript, end_seconds=None):
    url_args = {
        "start": mission_time(start_seconds)
    }
    if end_seconds:
        url_args["end"] = mission_time(end_seconds)
    
    if transcript and transcript != context['request'].mission.main_transcript:
        # Split transcript name from [mission]/[transcript]
        url_args["transcript"] = transcript.split('/')[1]
    
    # Render the URL
    url = reverse("view_range", kwargs=url_args)
    if isinstance(start_seconds, basestring):
        start_seconds = timestamp_to_seconds(start_seconds)
    return '%s#log-line-%i' % ( url, start_seconds )
예제 #13
0
def selection_url_in_transcript(context,
                                start_seconds,
                                transcript,
                                end_seconds=None):
    url_args = {"start": mission_time(start_seconds)}
    if end_seconds:
        url_args["end"] = mission_time(end_seconds)

    if transcript and transcript != context['request'].mission.main_transcript:
        # Split transcript name from [mission]/[transcript]
        url_args["transcript"] = transcript.split('/')[1]

    # Render the URL
    url = reverse("view_range", kwargs=url_args)
    if isinstance(start_seconds, basestring):
        start_seconds = timestamp_to_seconds(start_seconds)
    return '%s#log-line-%i' % (url, start_seconds)
예제 #14
0
    def render_to_response(self, context):
        # Ensure that the request is always redirected to:
        # - The first page (timestampless)
        # - The timestamp for the start of an act
        # - The timestamp for the start of an in-act page
        # If the timestamp is already one of these, render as normal

        requested_start = None
        if context["start"]:
            requested_start = timestamp_to_seconds(context["start"])
        current_act = context["current_act"]
        first_log_line = context["log_lines"][0]
        prior_log_line = first_log_line.previous()

        # NOTE: is_act_first_page will be false for first act:
        #       that's handled by is_first_page
        is_first_page = not prior_log_line
        is_act_first_page = False
        if prior_log_line:
            is_act_first_page = prior_log_line.timestamp < current_act.start <= first_log_line.timestamp

        page_start_url = None
        # If we're on the first page, but have a timestamp,
        # redirect to the bare page URL
        if requested_start and is_first_page:
            page_start_url = reverse("view_page")
        # If we're on the first page of an act,
        # but not on the act-start timestamp, redirect to that
        elif is_act_first_page and requested_start != current_act.start:
            page_start_url = timestamp_to_url(current_act.start)
        # If we're on any other page and the timestamp doesn't match
        # the timestamp of the first item, redirect to that item's timestamp
        elif requested_start and not is_act_first_page and requested_start != first_log_line.timestamp:
            page_start_url = timestamp_to_url(first_log_line.timestamp)

        # Redirect to the URL we found
        if page_start_url:
            if self.request.GET:
                page_start_url += "?%s" % self.request.GET.urlencode()
            return HttpResponseRedirect(page_start_url)

        return super(PageView, self).render_to_response(context)
예제 #15
0
파일: views.py 프로젝트: ariel/Spacelog
 def log_lines(self, start_page, end_page):
     "Returns the log lines and the previous/next timestamps, with images mixed in."
     if end_page > (start_page + 5):
         end_page = start_page + 5
     # Collect the log lines
     log_lines = []
     done_closest = False
     for page in range(start_page, end_page + 1):
         log_lines += list(self.main_transcript_query().page(page))
     for log_line in log_lines:
         log_line.images = list(log_line.images())
         log_line.lines = [(s, linkify(t, self.request))
                           for s, t in log_line.lines]
         # If this is the first after the start time, add an anchor later
         if log_line.timestamp > timestamp_to_seconds(
                 self.kwargs.get('start',
                                 "00:00:00:00")) and not done_closest:
             log_line.closest = True
             done_closest = True
     # Find all media that falls inside this same range, and add it onto the preceding line.
     for image_line in self.media_transcript_query().range(
             log_lines[0].timestamp, log_lines[-1].timestamp):
         # Find the line just before the images
         last_line = None
         for log_line in log_lines:
             if log_line.timestamp > image_line.timestamp:
                 break
             last_line = log_line
         # Add the images to it
         last_line.images += image_line.images()
     # Find the previous log line from this, and then the beginning of its page
     try:
         previous_timestamp = self.main_transcript_query().page(
             start_page - 1).first().timestamp
     except ValueError:
         previous_timestamp = None
     # Find the next log line and its timestamp
     next_timestamp = log_lines[-1].next_timestamp()
     # Return
     return log_lines, previous_timestamp, next_timestamp, 0, None
예제 #16
0
파일: views.py 프로젝트: niksbiks/Spacelog
    def get_context_data(self, start=None, end=None, transcript=None):

        if end is None:
            end = start

        # Get the content
        log_lines, previous_timestamp, next_timestamp, max_highlight_index, first_highlighted_line = self.log_lines(
            self.page_number(start),
            self.page_number(end),
        )

        act = log_lines[0].act()
        act_id = log_lines[0].act().number
        acts = list(self.act_query().items())
        previous_act = None
        next_act = None

        if act_id > 0:
            previous_act = acts[act_id - 1]
        if act_id < len(acts) - 1:
            next_act = acts[act_id + 1]

        for log_line in log_lines:
            if log_line.transcript_page:
                original_transcript_page = log_line.transcript_page
                break
        else:
            original_transcript_page = None

        if start:
            permalink_fragment = '#log-line-%s' % timestamp_to_seconds(start)
        else:
            permalink_fragment = '#log-line-%s' % log_lines[0].timestamp

        return {
            # HACK: Force request into context. Not sure why it's not here.
            'request':
            self.request,
            'mission_name':
            self.request.mission.name,
            'mission_main_transcript':
            self.request.mission.main_transcript,
            'transcript_name':
            self.get_transcript_name(),
            'transcript_short_name':
            self.get_transcript_name().split('/')[1],
            'start':
            start,
            'log_lines':
            log_lines,
            'next_timestamp':
            next_timestamp,
            'previous_timestamp':
            previous_timestamp,
            'acts':
            acts,
            'act':
            act_id + 1,
            'current_act':
            act,
            'previous_act':
            previous_act,
            'next_act':
            next_act,
            'max_highlight_index':
            max_highlight_index,
            'first_highlighted_line':
            first_highlighted_line,
            'original_transcript_page':
            original_transcript_page,
            'permalink':
            'http://%s%s%s' % (
                self.request.META['HTTP_HOST'],
                self.request.path,
                permalink_fragment,
            ),
            'other_transcripts':
            self.other_transcripts(
                log_lines[0].timestamp,
                log_lines[-1].timestamp,
            ),
        }
예제 #17
0
파일: parser.py 프로젝트: ariel/Spacelog
 def get_chunks(self, offset=0):
     """
     Reads the log lines from the file in order and yields them.
     """
     current_chunk = None
     reuse_line = None
     lines = iter(self.get_lines(offset))
     while lines or reuse_line:
         # If there's a line to reuse, use that, else read a new
         # line from the file.
         if reuse_line:
             line = reuse_line
             reuse_line = None
         else:
             try:
                 line = lines.next()
             except StopIteration:
                 break
             offset += len(line)
             line = line.decode("utf8")
         # If it's a comment or empty line, ignore it.
         if not line.strip() or line.strip()[0] == "#":
             continue
         # If it's a timestamp header, make a new chunk object.
         elif line[0] == "[":
             # Read the timestamp
             try:
                 timestamp = int(line[1:].split("]")[0])
             except ValueError:
                 timestamp = timestamp_to_seconds(line[1:].split("]")[0])
             if current_chunk:
                 yield current_chunk
             # Start a new log line item
             current_chunk = {
                 "timestamp": timestamp,
                 "lines": [],
                 "meta": {},
                 "offset": offset - len(line),
             }
         # If it's metadata, read the entire thing.
         elif line[0] == "_":
             # Meta item
             name, blob = line.split(":", 1)
             while True:
                 try:
                     line = lines.next()
                 except StopIteration:
                     break
                 offset += len(line)
                 line = line.decode("utf8")
                 if not line.strip() or line.strip()[0] == "#":
                     continue
                 if line[0] in string.whitespace:
                     blob += line
                 else:
                     reuse_line = line
                     break
             # Parse the blob
             blob = blob.strip()
             if blob:
                 try:
                     data = json.loads(blob)
                 except ValueError:
                     try:
                         data = json.loads('"%s"' % blob)
                     except ValueError:
                         print "Error: Invalid json at timestamp %s, key %s" % \
                                         (seconds_to_timestamp(timestamp), name)
                         continue
                 current_chunk['meta'][name.strip()] = data
         # If it's a continuation, append to the current line
         elif line[0] in string.whitespace:
             # Continuation line
             if not current_chunk:
                 print "Error: Continuation line before first timestamp header. Line: %s" % \
                                                                     (line)
             elif not current_chunk['lines']:
                 print "Error: Continuation line before first speaker name. Timestamp %s" % \
                                                                     (seconds_to_timestamp(timestamp))
             else:
                 current_chunk['lines'][-1]['text'] += " " + line.strip()
         # If it's a new line, start a new line. Shock.
         else:
             # New line of speech
             try:
                 speaker, text = line.split(":", 1)
             except ValueError:
                 print "Error: First speaker line not in Name: Text format: %s. Timestamp %s" % \
                                                                     (line, seconds_to_timestamp(timestamp))
             else:
                 line = {
                     "speaker": speaker.strip(),
                     "text": text.strip(),
                 }
                 current_chunk['lines'].append(line)
     # Finally, if there's one last chunk, yield it.
     if current_chunk:
         yield current_chunk
예제 #18
0
 def get_chunks(self, offset=0):
     """
     Reads the log lines from the file in order and yields them.
     """
     current_chunk = None
     reuse_line = None
     lines = iter(self.get_lines(offset))
     while lines or reuse_line:
         # If there's a line to reuse, use that, else read a new
         # line from the file.
         if reuse_line:
             line = reuse_line
             reuse_line = None
         else:
             try:
                 line = lines.next()
             except StopIteration:
                 break
             offset += len(line)
             line = line.decode("utf8")
         # If it's a comment or empty line, ignore it.
         if not line.strip() or line.strip()[0] == "#":
             continue
         # If it's a timestamp header, make a new chunk object.
         elif line[0] == "[":
             # Read the timestamp
             try:
                 timestamp = int(line[1:].split("]")[0])
             except ValueError:
                 try:
                     timestamp = timestamp_to_seconds(line[1:].split("]")[0])
                 except ValueError:
                     print "Error: invalid timestamp %s" % (line[1:], )
                     raise
             if current_chunk:
                 yield current_chunk
             # Start a new log line item
             current_chunk = {
                 "timestamp": timestamp,
                 "lines": [],
                 "meta": {},
                 "offset": offset - len(line),
             }
         # If it's metadata, read the entire thing.
         elif line[0] == "_":
             # Meta item
             name, blob = line.split(":", 1)
             while True:
                 try:
                     line = lines.next()
                 except StopIteration:
                     break
                 offset += len(line)
                 line = line.decode("utf8")
                 if not line.strip() or line.strip()[0] == "#":
                     continue
                 if line[0] in string.whitespace:
                     blob += line
                 else:
                     reuse_line = line
                     break
             # Parse the blob
             blob = blob.strip()
             if blob:
                 try:
                     data = json.loads(blob)
                 except ValueError:
                     try:
                         data = json.loads('"%s"' % blob)
                     except ValueError:
                         print "Error: Invalid json at timestamp %s, key %s" % \
                                         (seconds_to_timestamp(timestamp), name)
                         continue
                 current_chunk['meta'][name.strip()] = data
         # If it's a continuation, append to the current line
         elif line[0] in string.whitespace:
             # Continuation line
             if not current_chunk:
                 print "Error: Continuation line before first timestamp header. Line: %s" % \
                                                                     (line)
             elif not current_chunk['lines']:
                 print "Error: Continuation line before first speaker name."
             else:
                 current_chunk['lines'][-1]['text'] += " " + line.strip()
         # If it's a new line, start a new line. Shock.
         else:
             # New line of speech
             try:
                 speaker, text = line.split(":", 1)
             except ValueError:
                 print "Error: First speaker line not in Name: Text format: %s." % (line,)
             else:
                 line = {
                     "speaker": speaker.strip(),
                     "text": text.strip(),
                 }
                 current_chunk['lines'].append(line)
     # Finally, if there's one last chunk, yield it.
     if current_chunk:
         yield current_chunk
예제 #19
0
파일: views.py 프로젝트: Spacelog/Spacelog
    def get_context_data(self, start=None, end=None, transcript=None):

        if end is None:
            end = start

        # Get the content
        log_lines, previous_timestamp, next_timestamp, max_highlight_index, first_highlighted_line = self.log_lines(
            self.page_number(start),
            self.page_number(end),
        )
        
        act          = log_lines[0].act()
        act_id       = log_lines[0].act().number
        acts         = list(self.act_query().items())
        previous_act = None
        next_act     = None
        
        if act_id > 0:
            previous_act = acts[act_id-1]
        if act_id < len(acts) - 1:
            next_act = acts[act_id+1]
        
        for log_line in log_lines:
            if log_line.transcript_page:
                original_transcript_page = log_line.transcript_page
                break
        else:
            original_transcript_page = None
        
        if start:
            permalink_fragment = '#log-line-%s' % timestamp_to_seconds(start)
        else:
            permalink_fragment = '#log-line-%s' % log_lines[0].timestamp
        
        return {
            # HACK: Force request into context. Not sure why it's not here.
            'request': self.request,
            'mission_name': self.request.mission.name,
            'mission_main_transcript': self.request.mission.main_transcript,
            'transcript_name': self.get_transcript_name(),
            'transcript_short_name': self.get_transcript_name().split('/')[1],
            'start' : start,
            'log_lines': log_lines,
            'next_timestamp': next_timestamp,
            'previous_timestamp': previous_timestamp,
            'acts': acts,
            'act': act_id+1,
            'current_act': act,
            'previous_act': previous_act,
            'next_act': next_act,
            'max_highlight_index': max_highlight_index,
            'first_highlighted_line': first_highlighted_line,
            'original_transcript_page': original_transcript_page,
            'permalink': 'http://%s%s%s' % (
                self.request.META['HTTP_HOST'],
                self.request.path,
                permalink_fragment,
            ),
            'other_transcripts': self.other_transcripts(
                log_lines[0].timestamp,
                log_lines[-1].timestamp,
            ),
        }
예제 #20
0
파일: views.py 프로젝트: ariel/Spacelog
    def get_context_data(self):
        # Get the query text
        q = self.request.GET.get('q', '')
        # Get the offset value
        try:
            offset = int(self.request.GET.get('offset', '0'))
            if offset < 0:
                offset = 0
        except ValueError:
            offset = 0

        # Is it a special search?
        special_value = self.request.redis_conn.get("special_search:%s:%s" % (
            self.request.mission.name,
            q,
        ))
        if special_value:
            self.template_name = "search/special.html"
            return {
                "q": q,
                "text": special_value,
            }

        # Get the results from Xapian
        db = xappy.SearchConnection(
            os.path.join(
                settings.SITE_ROOT,
                '..',
                "xappydb",
            ), )
        query = db.query_parse(
            q,
            default_op=db.OP_OR,
            deny=["mission"],
        )
        query = db.query_filter(
            query,
            db.query_composite(db.OP_AND, [
                db.query_field("mission", self.request.mission.name),
                db.query_field("transcript",
                               self.request.mission.main_transcript),
            ]))
        results = db.search(
            query=query,
            startrank=offset,
            endrank=offset + PAGESIZE,
            checkatleast=
            -1,  # everything (entire xapian db fits in memory, so this should be fine)
            sortby="-weight",
        )
        # Go through the results, building a list of LogLine objects
        redis_conn = self.request.redis_conn
        log_lines = []
        for result in results:
            transcript_name, timestamp = result.id.split(":", 1)
            log_line = LogLine(redis_conn, transcript_name, int(timestamp))
            log_line.speaker = Character(redis_conn,
                                         transcript_name.split('/')[0],
                                         result.data['speaker_identifier'][0])
            log_line.title = mark_safe(
                result.summarise("text",
                                 maxlen=50,
                                 ellipsis='&hellip;',
                                 strict_length=True,
                                 hl=None))
            log_line.summary = mark_safe(
                result.summarise("text",
                                 maxlen=600,
                                 ellipsis='&hellip;',
                                 hl=('<mark>', '</mark>')))
            log_lines.append(log_line)

        def page_url(offset):
            return reverse("search") + '?' + urllib.urlencode(
                {
                    'q': q.encode('utf-8'),
                    'offset': offset,
                })

        if offset == 0:
            previous_page = False
        else:
            previous_page = page_url(offset - PAGESIZE)

        if offset + PAGESIZE > results.matches_estimated:
            next_page = False
        else:
            next_page = page_url(offset + PAGESIZE)

        thispage = offset / PAGESIZE
        maxpage = results.matches_estimated / PAGESIZE

        pages_to_show = set([0]) | set([thispage - 1, thispage, thispage + 1
                                        ]) | set([maxpage])
        if 0 == thispage:
            pages_to_show.remove(thispage - 1)
        if maxpage == thispage:
            pages_to_show.remove(thispage + 1)
        pages = []

        class Page(object):
            def __init__(self, number, url, selected=False):
                self.number = number
                self.url = url
                self.selected = selected

        pages_in_order = list(pages_to_show)
        pages_in_order.sort()
        for page in pages_in_order:
            if len(pages) > 0 and page != pages[-1].number:
                pages.append('...')
            pages.append(
                Page(page + 1, page_url(page * PAGESIZE), page == thispage))

        error_info = self.request.redis_conn.hgetall(
            "error_page:%s:%s" % (
                self.request.mission.name,
                'no_search_results',
            ), )
        if not error_info:
            error_info = {}
        if error_info.has_key('classic_moment_quote'):
            error_quote = LogLine(
                self.request.redis_conn, self.request.mission.main_transcript,
                timestamp_to_seconds(error_info['classic_moment_quote']))
        else:
            error_quote = None

        return {
            'log_lines': log_lines,
            'result': results,
            'q': q,
            'previous_page': previous_page,
            'next_page': next_page,
            'pages': pages,
            'debug': {
                'query': query,
            },
            'error': {
                'info': error_info,
                'quote': error_quote,
            }
        }
예제 #21
0
    def get_context_data(self):
        # Get the query text
        q = self.request.GET.get("q", "")
        # Get the offset value
        try:
            offset = int(self.request.GET.get("offset", "0"))
            if offset < 0:
                offset = 0
        except ValueError:
            offset = 0

        # Is it a special search?
        special_value = self.request.redis_conn.get("special_search:%s:%s" % (self.request.mission.name, q))
        if special_value:
            self.template_name = "search/special.html"
            return {"q": q, "text": special_value}

        # Get the results from Xapian
        db = xappy.SearchConnection(os.path.join(settings.SITE_ROOT, "..", "xappydb"))
        query = db.query_parse(q, default_op=db.OP_OR, deny=["mission"])
        query = db.query_filter(
            query,
            db.query_composite(
                db.OP_AND,
                [
                    db.query_field("mission", self.request.mission.name),
                    db.query_field("transcript", "%s/TEC" % self.request.mission.name),
                ],
            ),
        )
        results = db.search(
            query=query,
            startrank=offset,
            endrank=offset + PAGESIZE,
            checkatleast=-1,  # everything (entire xapian db fits in memory, so this should be fine)
            sortby="-weight",
        )
        # Go through the results, building a list of LogLine objects
        redis_conn = self.request.redis_conn
        log_lines = []
        for result in results:
            transcript_name, timestamp = result.id.split(":", 1)
            log_line = LogLine(redis_conn, transcript_name, int(timestamp))
            log_line.speaker = Character(
                redis_conn, transcript_name.split("/")[0], result.data["speaker_identifier"][0]
            )
            log_line.title = mark_safe(
                result.summarise("text", maxlen=50, ellipsis="&hellip;", strict_length=True, hl=None)
            )
            log_line.summary = mark_safe(
                result.summarise("text", maxlen=600, ellipsis="&hellip;", hl=("<mark>", "</mark>"))
            )
            log_lines.append(log_line)

        def page_url(offset):
            return reverse("search") + "?" + urllib.urlencode({"q": q, "offset": offset})

        if offset == 0:
            previous_page = False
        else:
            previous_page = page_url(offset - PAGESIZE)

        if offset + PAGESIZE > results.matches_estimated:
            next_page = False
        else:
            next_page = page_url(offset + PAGESIZE)

        thispage = offset / PAGESIZE
        maxpage = results.matches_estimated / PAGESIZE

        pages_to_show = set([0]) | set([thispage - 1, thispage, thispage + 1]) | set([maxpage])
        if 0 == thispage:
            pages_to_show.remove(thispage - 1)
        if maxpage == thispage:
            pages_to_show.remove(thispage + 1)
        pages = []

        class Page(object):
            def __init__(self, number, url, selected=False):
                self.number = number
                self.url = url
                self.selected = selected

        pages_in_order = list(pages_to_show)
        pages_in_order.sort()
        for page in pages_in_order:
            if len(pages) > 0 and page != pages[-1].number:
                pages.append("...")
            pages.append(Page(page + 1, page_url(page * PAGESIZE), page == thispage))

        error_info = self.request.redis_conn.hgetall(
            "error_page:%s:%s" % (self.request.mission.name, "no_search_results")
        )
        if not error_info:
            error_info = {}
        if error_info.has_key("classic_moment_quote"):
            error_quote = LogLine(
                self.request.redis_conn,
                self.request.mission.main_transcript,
                timestamp_to_seconds(error_info["classic_moment_quote"]),
            )
        else:
            error_quote = None

        return {
            "log_lines": log_lines,
            "result": results,
            "q": q,
            "previous_page": previous_page,
            "next_page": next_page,
            "pages": pages,
            "debug": {"query": query},
            "error": {"info": error_info, "quote": error_quote},
        }
예제 #22
0
파일: views.py 프로젝트: groodt/Spacelog
 def parse_mission_time(self, mission_time):
     "Parses a mission timestamp from a URL and converts it to a number of seconds"
     # d, h, m, s = [ int(x) for x in mission_time.split(':') ]
     # print mission_time
     # return s + m*60 + h*3600 + d*86400
     return timestamp_to_seconds( mission_time )
예제 #23
0
파일: views.py 프로젝트: ariel/Spacelog
 def parse_mission_time(self, mission_time):
     "Parses a mission timestamp from a URL and converts it to a number of seconds"
     # d, h, m, s = [ int(x) for x in mission_time.split(':') ]
     # print mission_time
     # return s + m*60 + h*3600 + d*86400
     return timestamp_to_seconds(mission_time)
예제 #24
0
파일: views.py 프로젝트: ariel/Spacelog
    def get_context_data(self):
        # Get the query text
        q = self.request.GET.get('q', '')
        # Get the offset value
        try:
            offset = int(
                self.request.GET.get('offset', '0')
            )
            if offset < 0:
                offset = 0
        except ValueError:
            offset = 0

        # Is it a special search?
        special_value = self.request.redis_conn.get("special_search:%s:%s" % (
            self.request.mission.name,
            q,
        ))
        if special_value:
            self.template_name = "search/special.html"
            return {
                "q": q,
                "text": special_value,
            }

        # Get the results from Xapian
        db = xappy.SearchConnection(
            os.path.join(
                settings.SITE_ROOT,
                '..',
                "xappydb",
            ),
        )
        query = db.query_parse(
            q,
            default_op=db.OP_OR,
            deny = [ "mission" ],
        )
        query=db.query_filter(
            query,
            db.query_composite(db.OP_AND, [
                db.query_field("mission", self.request.mission.name),
                db.query_field("transcript", self.request.mission.main_transcript),
            ])
        )
        results = db.search(
            query=query,
            startrank=offset,
            endrank=offset+PAGESIZE,
            checkatleast=-1, # everything (entire xapian db fits in memory, so this should be fine)
            sortby="-weight",
        )
        # Go through the results, building a list of LogLine objects
        redis_conn = self.request.redis_conn
        log_lines = []
        for result in results:
            transcript_name, timestamp = result.id.split(":", 1)
            log_line = LogLine(redis_conn, transcript_name, int(timestamp))
            log_line.speaker = Character(redis_conn, transcript_name.split('/')[0], result.data['speaker_identifier'][0])
            log_line.title = mark_safe(result.summarise("text", maxlen=50, ellipsis='&hellip;', strict_length=True, hl=None))
            log_line.summary = mark_safe(result.summarise("text", maxlen=600, ellipsis='&hellip;', hl=('<mark>', '</mark>')))
            log_lines.append(log_line)

        def page_url(offset):
            return reverse("search") + '?' + urllib.urlencode({
                'q': q.encode('utf-8'),
                'offset': offset,
            })

        if offset==0:
            previous_page = False
        else:
            previous_page = page_url(offset - PAGESIZE)

        if offset+PAGESIZE > results.matches_estimated:
            next_page = False
        else:
            next_page = page_url(offset + PAGESIZE)

        thispage = offset / PAGESIZE
        maxpage = results.matches_estimated / PAGESIZE
        
        pages_to_show = set([0]) | set([thispage-1, thispage, thispage+1]) | set([maxpage])
        if 0 == thispage:
            pages_to_show.remove(thispage-1)
        if maxpage == thispage:
            pages_to_show.remove(thispage+1)
        pages = []
        
        class Page(object):
            def __init__(self, number, url, selected=False):
                self.number = number
                self.url = url
                self.selected = selected
        
        pages_in_order = list(pages_to_show)
        pages_in_order.sort()
        for page in pages_in_order:
            if len(pages)>0 and page != pages[-1].number:
                pages.append('...')
            pages.append(Page(page+1, page_url(page*PAGESIZE), page==thispage))
        
        error_info = self.request.redis_conn.hgetall(
            "error_page:%s:%s" % (
                self.request.mission.name,
                'no_search_results',
            ),
        )
        if not error_info:
            error_info = {}
        if error_info.has_key('classic_moment_quote'):
            error_quote = LogLine(
                self.request.redis_conn,
                self.request.mission.main_transcript,
                timestamp_to_seconds(error_info['classic_moment_quote'])
            )
        else:
            error_quote = None
        
        return {
            'log_lines': log_lines,
            'result': results,
            'q': q,
            'previous_page': previous_page,
            'next_page': next_page,
            'pages': pages,
            'debug': {
                'query': query,
            },
            'error': {
                'info': error_info,
                'quote': error_quote,
            }
        }
예제 #25
0
파일: views.py 프로젝트: ariel/Spacelog
    def get_context_data(self, start=None, end=None):

        if end is None:
            end = start

        # Get the content
        log_lines, previous_timestamp, next_timestamp, max_highlight_index, first_highlighted_line = self.log_lines(
            self.page_number(start),
            self.page_number(end),
        )

        act = log_lines[0].act()
        act_id = log_lines[0].act().number
        acts = list(self.act_query().items())
        previous_act = None
        next_act = None

        if act_id > 0:
            previous_act = acts[act_id - 1]
        if act_id < len(acts) - 1:
            next_act = acts[act_id + 1]

        for log_line in log_lines:
            if log_line.transcript_page:
                original_transcript_page = log_line.transcript_page
                break
        else:
            original_transcript_page = None

        if start:
            permalink_fragment = '#log-line-%s' % timestamp_to_seconds(start)
        else:
            permalink_fragment = '#log-line-%s' % log_lines[0].timestamp

        return {
            'start':
            start,
            'log_lines':
            log_lines,
            'next_timestamp':
            next_timestamp,
            'previous_timestamp':
            previous_timestamp,
            'acts':
            acts,
            'act':
            act_id + 1,
            'current_act':
            act,
            'previous_act':
            previous_act,
            'next_act':
            next_act,
            'max_highlight_index':
            max_highlight_index,
            'first_highlighted_line':
            first_highlighted_line,
            'original_transcript_page':
            original_transcript_page,
            'permalink':
            'http://%s%s%s' % (
                self.request.META['HTTP_HOST'],
                self.request.path,
                permalink_fragment,
            )
        }