Exemple #1
0
 def test_encode_unicode(self):
     self.assertEqual(utilities.as_encoded_str(u'Ivan Krsti\u0107'),
                      'Ivan Krsti\xc4\x87')
     self.assertEqual(
         utilities.as_encoded_str(u'Ivan Krsti\u0107', encoding='utf-16'),
         '\xff\xfeI\x00v\x00a\x00n\x00 \x00K\x00r\x00s\x00t\x00i\x00\x07\x01'
     )
def playlist_export_report_worker(results, request_params):
    form = PlaylistReportForm(data=request_params)
    if not form.is_valid():
        # TODO(Kumar) make this visible to the user
        raise ValueError('Invalid PlaylistReportForm')
    from_date = form.cleaned_data['from_date']
    to_date = form.cleaned_data['to_date']

    if results is None:
        # when starting the job, init file lines with the header row...
        results = {
            'items': {},  # items keyed by datetime established
            'last_offset': 0,
            'from_date': str(from_date),
            'to_date': str(to_date),
        }

    offset = results['last_offset']
    last_offset = offset+50
    results['last_offset'] = last_offset

    query = filter_playlist_events_by_date_range(from_date, to_date)
    all_entries = query[ offset: last_offset ]

    if len(all_entries) == 0:
        finished = True
    else:
        finished = False

    for entry in all_entries:
        established = _get_entity_attr(entry, 'established_display')
        report_key = as_encoded_str(str(established))
      
        if type(entry) == PlaylistBreak:
            results['items'][report_key] = {
                'established': as_encoded_str(established.strftime('%Y-%m-%d %H:%M:%S')),
                'is_break': True
            }
            continue
       
        playlist = _get_entity_attr(entry, 'playlist') 
        track = _get_entity_attr(entry, 'track')
        results['items'][report_key] = {
            'channel': as_encoded_str(_get_entity_attr(playlist, 'channel')),
            'date': as_encoded_str(established.strftime("%m/%d/%y")),
            'duration_ms': as_encoded_str(_get_entity_attr(track, 
                                                           'duration_ms', 0)),
            'established': as_encoded_str(established.strftime('%Y-%m-%d %H:%M:%S')),
            'artist_name': as_encoded_str(_get_entity_attr(entry,
                                                           'artist_name')),
            'track_title': as_encoded_str(_get_entity_attr(entry,
                                                            'track_title')),
            'album_title': as_encoded_str(_get_entity_attr(entry, 
                                                           'album_title_display')),
            'label': as_encoded_str(_get_entity_attr(entry, 'label_display')),
            'is_break': False
        }

    return finished, results
Exemple #3
0
def playlist_report_worker(results, request_params):
    form = PlaylistReportForm(data=request_params)
    if not form.is_valid():
        # TODO(Kumar) make this visible to the user
        raise ValueError('Invalid PlaylistReportForm')
    from_date = form.cleaned_data['from_date']
    to_date = form.cleaned_data['to_date']

    if results is None:
        # when starting the job, init file lines with the header row...
        results = {
            'items': {},  # items keyed by play key
            'last_offset': 0,
            'play_counts': {},  # play keys to number of plays
            'from_date': str(from_date),
            'to_date': str(to_date),
        }

    offset = results['last_offset']
    last_offset = offset + 50
    results['last_offset'] = last_offset

    query = filter_tracks_by_date_range(from_date, to_date)
    all_entries = query[offset:last_offset]

    if len(all_entries) == 0:
        finished = True
    else:
        finished = False

    for entry in all_entries:
        play_key = play_count_key(entry)
        if play_key in results['play_counts']:
            results['play_counts'][play_key] += 1
            continue
        else:
            results['play_counts'][play_key] = 1
        results['items'][play_key] = {
            'album_title':
            as_encoded_str(_get_entity_attr(entry, 'album_title')),
            'artist_name':
            as_encoded_str(_get_entity_attr(entry, 'artist_name')),
            'label':
            as_encoded_str(_get_entity_attr(entry, 'label')),
            'heavy_rotation':
            str(int(bool(HEAVY_ROTATION_TAG in entry.categories))),
            'light_rotation':
            str(int(bool(LIGHT_ROTATION_TAG in entry.categories)))
        }

    return finished, results
def playlist_report_worker(results, request_params):
    form = PlaylistReportForm(data=request_params)
    if not form.is_valid():
        # TODO(Kumar) make this visible to the user
        raise ValueError('Invalid PlaylistReportForm')
    from_date = form.cleaned_data['from_date']
    to_date = form.cleaned_data['to_date']

    if results is None:
        # when starting the job, init file lines with the header row...
        results = {
            'items': {},  # items keyed by play key
            'last_offset': 0,
            'play_counts': {},  # play keys to number of plays
            'from_date': str(from_date),
            'to_date': str(to_date),
        }

    offset = results['last_offset']
    last_offset = offset+50
    results['last_offset'] = last_offset

    query = filter_tracks_by_date_range(from_date, to_date)
    all_entries = query[ offset: last_offset ]

    if len(all_entries) == 0:
        finished = True
    else:
        finished = False

    for entry in all_entries:
        play_key = play_count_key(entry)
        if play_key in results['play_counts']:
            results['play_counts'][play_key] += 1
            continue
        else:
            results['play_counts'][play_key] = 1
        results['items'][play_key] = {
            'album_title': as_encoded_str(_get_entity_attr(entry,
                                                           'album_title')),
            'artist_name': as_encoded_str(_get_entity_attr(entry,
                                                           'artist_name')),
            'label': as_encoded_str(_get_entity_attr(entry, 'label')),
            'heavy_rotation': str(int(bool(HEAVY_ROTATION_TAG in
                                           entry.categories))),
            'light_rotation': str(int(bool(LIGHT_ROTATION_TAG in
                                           entry.categories)))
        }

    return finished, results
def playlist_report_export_product(results):
    fname = "chirp-export-report_%s_%s" % (results['from_date'],
                                           results['to_date'])
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = "attachment; filename=%s.txt" % (fname)
    writer = csv.writer(response, EXPORT_REPORT_FIELDS, delimiter='\t')
    writer.writerow(EXPORT_REPORT_FIELDS)
    # sort on date and time
    sorted_keys = sorted(results['items'], reverse=True)
    # construct start and end times
    prev_established = None
    for key in sorted_keys:
        item = results['items'][key]
        established = datetime.strptime(item['established'], '%Y-%m-%d %H:%M:%S')
        if item['is_break']: 
            prev_established = established
            continue
        item['start_time'] = established.strftime('%H:%M:%S')
        # calculate end times
        if prev_established and prev_established.date() == established.date():
            item['end_time'] = (prev_established - timedelta(seconds=1)).strftime('%H:%M:%S')
        else:
            # track is last played for the day
            if item['duration_ms']:
                delta = timedelta(milliseconds=item['duration_ms'])
                item['end_time'] = (established + delta).strftime('%H:%M:%S')
            else:
                # no track duration, default to 4 minutes
                item['end_time'] = (established + timedelta(minutes=4)).strftime('%H:%M:%S')
        prev_established = established
        writer.writerow([as_encoded_str(item[k], errors='replace')
                         for k in EXPORT_REPORT_FIELDS])
    return response
def trafficlog_report_worker(results, request_params):
    fields = ['readtime', 'dow', 'slot_time', 'underwriter',
              'title', 'type', 'excerpt']
    if results is None:
        # when starting the job, init file lines with the header row...
        results = {
            "file_lines": [ 
                ",".join(fields) + "\n" 
            ],
            'last_offset': 0
        }
    
    offset = results['last_offset']
    last_offset = offset+50
    results['last_offset'] = last_offset
    
    def mkdt(dt_string):
        parts = [int(p) for p in dt_string.split("-")]
        return datetime.datetime(*parts)
        
    query = (models.TrafficLogEntry.all()
                .filter('log_date >=', mkdt(request_params['start_date']))
                .filter('log_date <',
                        mkdt(request_params['end_date']) +
                        datetime.timedelta(days=1))
    )
    if request_params['type']:
        index = constants.SPOT_TYPE_CHOICES.index(request_params['type'])
        if index > 0:
            # -1 = not found, 0 = ALL
            spots = models.Spot.all().filter(
                        'type =', constants.SPOT_TYPE_CHOICES[index])
            query = query.filter('spot IN', list(spots))
    # TODO(Kumar) figure out why this doesn't work!
    # if request_params['underwriter']:
    #     copies = models.SpotCopy.all().filter('underwriter =',
    #                                           request_params['underwriter'])
    #     query = query.filter('spot_copy IN', list(copies))
                        
    all_entries = query[ offset: last_offset ]
    if len(all_entries) == 0:
        finished = True
    else:
        finished = False
    
    for entry in all_entries:
        # TODO(Kumar) - see above
        if request_params['underwriter']:
            if entry.spot_copy.underwriter != request_params['underwriter']:
                continue
        buf = StringIO()
        writer = csv.DictWriter(buf, fields)
        row = report_entry_to_csv_dict(entry)
        for k, v in row.items():
            row[k] = as_encoded_str(v, encoding='utf8')
        writer.writerow(row)
        results['file_lines'].append(buf.getvalue())
    
    return finished, results
Exemple #7
0
def play_count_key(item):
    """given a playlist record, generate a key to tally play counts with.

    For example: Talking Book,Stevie Wonder,Motown
    """
    key_parts = []
    for key in ['album_title', 'artist_name', 'label']:
        stub = as_encoded_str(_get_entity_attr(item, key, ''))
        stub = stub.lower()
        key_parts.append(stub)
    return ','.join(key_parts)
Exemple #8
0
    def item_key(item):
        key_parts = []
        for key in fields:
            stub = as_encoded_str(_get_entity_attr(item, key, ""))

            if stub is None:
                # for existing None-type attributes
                stub = ""
            stub = stub.lower()
            key_parts.append(stub)
        return ",".join(key_parts)
def play_count_key(item):
    """given a playlist record, generate a key to tally play counts with.

    For example: Talking Book,Stevie Wonder,Motown
    """
    key_parts = []
    for key in ['album_title', 'artist_name', 'label']:
        stub = as_encoded_str(_get_entity_attr(item, key, ''))
        stub = stub.lower()
        key_parts.append(stub)
    return ','.join(key_parts)
Exemple #10
0
def trafficlog_report_worker(results, request_params):
    fields = [
        'readtime', 'dow', 'slot_time', 'underwriter', 'title', 'type',
        'excerpt'
    ]
    if results is None:
        # when starting the job, init file lines with the header row...
        results = {"file_lines": [",".join(fields) + "\n"], 'last_offset': 0}

    offset = results['last_offset']
    last_offset = offset + 50
    results['last_offset'] = last_offset

    def mkdt(dt_string):
        parts = [int(p) for p in dt_string.split("-")]
        return datetime.datetime(*parts)

    query = (models.TrafficLogEntry.all().filter(
        'log_date >=', mkdt(request_params['start_date'])).filter(
            'log_date <',
            mkdt(request_params['end_date']) + datetime.timedelta(days=1)))
    if request_params['type']:
        index = constants.SPOT_TYPE_CHOICES.index(request_params['type'])
        if index > 0:
            # -1 = not found, 0 = ALL
            spots = models.Spot.all().filter(
                'type =', constants.SPOT_TYPE_CHOICES[index])
            query = query.filter('spot IN', list(spots))
    # TODO(Kumar) figure out why this doesn't work!
    # if request_params['underwriter']:
    #     copies = models.SpotCopy.all().filter('underwriter =',
    #                                           request_params['underwriter'])
    #     query = query.filter('spot_copy IN', list(copies))

    all_entries = query[offset:last_offset]
    if len(all_entries) == 0:
        finished = True
    else:
        finished = False

    for entry in all_entries:
        # TODO(Kumar) - see above
        if request_params['underwriter']:
            if entry.spot_copy.underwriter != request_params['underwriter']:
                continue
        buf = StringIO()
        writer = csv.DictWriter(buf, fields)
        row = report_entry_to_csv_dict(entry)
        for k, v in row.items():
            row[k] = as_encoded_str(v, encoding='utf8')
        writer.writerow(row)
        results['file_lines'].append(buf.getvalue())

    return finished, results
Exemple #11
0
    def item_key(item):
        key_parts = []
        for key in fields:
            stub = as_encoded_str(_get_entity_attr(item, key, ''))

            if stub is None:
                # for existing None-type attributes
                stub = ''
            stub = stub.lower()
            key_parts.append(stub)
        return ','.join(key_parts)
def playlist_report_product(results):
    fname = "chirp-play-count_%s_%s" % (results['from_date'],
                                        results['to_date'])
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = "attachment; filename=%s.csv" % (fname)
    writer = csv.writer(response, REPORT_FIELDS)
    writer.writerow(REPORT_FIELDS)
    for play_key, item in results['items'].iteritems():
        item['from_date'] = results['from_date']
        item['to_date'] = results['to_date']
        item['play_count'] = results['play_counts'][play_key]
        writer.writerow([as_encoded_str(item[k], errors='replace')
                         for k in REPORT_FIELDS])
    return response
Exemple #13
0
def playlist_report_product(results):
    fname = "chirp-play-count_%s_%s" % (results['from_date'],
                                        results['to_date'])
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = "attachment; filename=%s.csv" % (fname)
    writer = csv.writer(response, REPORT_FIELDS)
    writer.writerow(REPORT_FIELDS)
    for play_key, item in results['items'].iteritems():
        item['from_date'] = results['from_date']
        item['to_date'] = results['to_date']
        item['play_count'] = results['play_counts'][play_key]
        writer.writerow(
            [as_encoded_str(item[k], errors='replace') for k in REPORT_FIELDS])
    return response
Exemple #14
0
def playlist_report_export_product(results):
    fname = "chirp-export-report_%s_%s" % (results['from_date'],
                                           results['to_date'])
    response = HttpResponse(content_type='text/csv; charset=utf-8')
    response['Content-Disposition'] = "attachment; filename=%s.txt" % (fname)
    writer = csv.writer(response, EXPORT_REPORT_FIELDS, delimiter='\t')
    writer.writerow(EXPORT_REPORT_FIELDS)
    # sort on date and time
    sorted_keys = sorted(results['items'], reverse=True)
    # construct start and end times
    prev_established = None
    for key in sorted_keys:
        item = results['items'][key]
        established = datetime.strptime(item['established'],
                                        '%Y-%m-%d %H:%M:%S')
        if item['is_break']:
            prev_established = established
            continue
        item['start_time'] = established.strftime('%H:%M:%S')
        # calculate end times
        if prev_established and prev_established.date() == established.date():
            item['end_time'] = (prev_established -
                                timedelta(seconds=1)).strftime('%H:%M:%S')
        else:
            # track is last played for the day
            if item['duration_ms']:
                delta = timedelta(milliseconds=item['duration_ms'])
                item['end_time'] = (established + delta).strftime('%H:%M:%S')
            else:
                # no track duration, default to 4 minutes
                item['end_time'] = (established +
                                    timedelta(minutes=4)).strftime('%H:%M:%S')
        prev_established = established
        writer.writerow([
            as_encoded_str(item[k], errors='replace')
            for k in EXPORT_REPORT_FIELDS
        ])
    return response
Exemple #15
0
 def test_encode_unicode(self):
     self.assertEqual(utilities.as_encoded_str(u"Ivan Krsti\u0107"), "Ivan Krsti\xc4\x87")
     self.assertEqual(
         utilities.as_encoded_str(u"Ivan Krsti\u0107", encoding="utf-16"),
         "\xff\xfeI\x00v\x00a\x00n\x00 \x00K\x00r\x00s\x00t\x00i\x00\x07\x01",
     )
Exemple #16
0
def send_track_to_live365(request):
    """
    Background Task URL to send playlist to Live 365 service.

    This view expects POST parameters:

    **id**
    The Datastore key of the playlist entry

    When POSTing to Live 365 here are the parameters:

    **member_name**
    Live365 member name

    **password**
    Live365 password

    **sessionid**
    Unused.  This is an alternative to user password and looks like
    membername:sessionkey as returned by api_login.cgi

    **version**
    Version of API request.  Currently this must be 2

    **filename**
    I think we can leave this blank because Live365 docs say they
    will use it to guess song and artist info if none was sent.

    **seconds**
    Length of the track in seconds.  Live365 uses this to refresh its
    popup player window thing.  So really we should probably set this to 60 or 120
    because DJs might be submitting playlist entries out of sync with when
    they are actually playing the songs.

    **title**
    Song title

    **artist**
    Artist name

    **album**
    Album title
    """
    track = AutoRetry(PlaylistEvent).get(request.POST['id'])
    if not track:
        log.warning("Requested to create a non-existant track of ID %r" % request.POST['id'])
        # this is not an error (malicious POST, etc), so make sure the task succeeds:
        return task_response({'success':True})

    log.info("Live365 create track %s" % track.key())

    qs = {
        'member_name': dbconfig['live365.member_name'],
        'password': dbconfig['live365.password'],
        'version': 2,
        'seconds': 30,
        'title': as_encoded_str(track.track_title, encoding='latin-1', errors="ignore"),
        'artist': as_encoded_str(track.artist_name, encoding='latin-1', errors="ignore"),
        'album': as_encoded_str(track.album_title, encoding='latin-1', errors="ignore")
    }
    data = urllib.urlencode(qs)
    headers = {"Content-type": "application/x-www-form-urlencoded"}
    # in prod: http://www.live365.com/cgi-bin/add_song.cgi
    service_url = dbconfig['live365.service_url']
    result = _fetch_url(url=service_url, method='POST', data=data, headers=headers)
    return task_response(result)
Exemple #17
0
def playlist_export_report_worker(results, request_params):
    form = PlaylistReportForm(data=request_params)
    if not form.is_valid():
        # TODO(Kumar) make this visible to the user
        raise ValueError('Invalid PlaylistReportForm')
    from_date = form.cleaned_data['from_date']
    to_date = form.cleaned_data['to_date']

    if results is None:
        # when starting the job, init file lines with the header row...
        results = {
            'items': {},  # items keyed by datetime established
            'last_offset': 0,
            'from_date': str(from_date),
            'to_date': str(to_date),
        }

    offset = results['last_offset']
    last_offset = offset + 50
    results['last_offset'] = last_offset

    query = filter_playlist_events_by_date_range(from_date, to_date)
    all_entries = query[offset:last_offset]

    if len(all_entries) == 0:
        finished = True
    else:
        finished = False

    for entry in all_entries:
        established = _get_entity_attr(entry, 'established_display')
        report_key = as_encoded_str(str(established))

        if type(entry) == PlaylistBreak:
            results['items'][report_key] = {
                'established':
                as_encoded_str(established.strftime('%Y-%m-%d %H:%M:%S')),
                'is_break':
                True
            }
            continue

        playlist = _get_entity_attr(entry, 'playlist')
        track = _get_entity_attr(entry, 'track')
        results['items'][report_key] = {
            'channel':
            as_encoded_str(_get_entity_attr(playlist, 'channel')),
            'date':
            as_encoded_str(established.strftime("%m/%d/%y")),
            'duration_ms':
            as_encoded_str(_get_entity_attr(track, 'duration_ms', 0)),
            'established':
            as_encoded_str(established.strftime('%Y-%m-%d %H:%M:%S')),
            'artist_name':
            as_encoded_str(_get_entity_attr(entry, 'artist_name')),
            'track_title':
            as_encoded_str(_get_entity_attr(entry, 'track_title')),
            'album_title':
            as_encoded_str(_get_entity_attr(entry, 'album_title_display')),
            'label':
            as_encoded_str(_get_entity_attr(entry, 'label_display')),
            'is_break':
            False
        }

    return finished, results
Exemple #18
0
def send_track_to_live365(request):
    """
    Background Task URL to send playlist to Live 365 service.

    This view expects POST parameters:

    **id**
    The Datastore key of the playlist entry

    When POSTing to Live 365 here are the parameters:

    **member_name**
    Live365 member name

    **password**
    Live365 password

    **sessionid**
    Unused.  This is an alternative to user password and looks like
    membername:sessionkey as returned by api_login.cgi

    **version**
    Version of API request.  Currently this must be 2

    **filename**
    I think we can leave this blank because Live365 docs say they
    will use it to guess song and artist info if none was sent.

    **seconds**
    Length of the track in seconds.  Live365 uses this to refresh its
    popup player window thing.  So really we should probably set this to 60 or 120
    because DJs might be submitting playlist entries out of sync with when
    they are actually playing the songs.

    **title**
    Song title

    **artist**
    Artist name

    **album**
    Album title
    """
    track = AutoRetry(PlaylistEvent).get(request.POST['id'])
    if not track:
        log.warning("Requested to create a non-existant track of ID %r" %
                    request.POST['id'])
        # this is not an error (malicious POST, etc), so make sure the task succeeds:
        return task_response({'success': True})

    log.info("Live365 create track %s" % track.key())

    qs = {
        'member_name':
        dbconfig['live365.member_name'],
        'password':
        dbconfig['live365.password'],
        'version':
        2,
        'seconds':
        30,
        'title':
        as_encoded_str(track.track_title, encoding='latin-1', errors="ignore"),
        'artist':
        as_encoded_str(track.artist_name, encoding='latin-1', errors="ignore"),
        'album':
        as_encoded_str(track.album_title, encoding='latin-1', errors="ignore")
    }
    data = urllib.urlencode(qs)
    headers = {"Content-type": "application/x-www-form-urlencoded"}
    # in prod: http://www.live365.com/cgi-bin/add_song.cgi
    service_url = dbconfig['live365.service_url']
    result = _fetch_url(url=service_url,
                        method='POST',
                        data=data,
                        headers=headers)
    return task_response(result)
 def test_encode_unicode(self):
     self.assertEqual(utilities.as_encoded_str(u'Ivan Krsti\u0107'),
                         'Ivan Krsti\xc4\x87')
     self.assertEqual(utilities.as_encoded_str(u'Ivan Krsti\u0107', encoding='utf-16'),
                         '\xff\xfeI\x00v\x00a\x00n\x00 \x00K\x00r\x00s\x00t\x00i\x00\x07\x01')
Exemple #20
0
 def test_encode_unicode_with_error_mode(self):
     self.assertEqual(
         utilities.as_encoded_str(u"Ivan Krsti\u0107", encoding="ascii", errors="replace"), "Ivan Krsti?"
     )
Exemple #21
0
 def test_passthru_encoded_str(self):
     self.assertEqual(utilities.as_encoded_str("Ivan Krsti\xc4\x87"), "Ivan Krsti\xc4\x87")
Exemple #22
0
 def test_encode_unicode_with_error_mode(self):
     self.assertEqual(utilities.as_encoded_str(u'Ivan Krsti\u0107',
                                                     encoding='ascii',
                                                     errors='replace'),
                                                 'Ivan Krsti?')
Exemple #23
0
 def test_passthru_encoded_str(self):
     self.assertEqual(utilities.as_encoded_str('Ivan Krsti\xc4\x87'), 'Ivan Krsti\xc4\x87')
 def test_encode_unicode_with_error_mode(self):
     self.assertEqual(utilities.as_encoded_str(u'Ivan Krsti\u0107',
                                                     encoding='ascii',
                                                     errors='replace'),
                                                 'Ivan Krsti?')