Ejemplo n.º 1
0
def make_tmpdb(request):
    ids = get_or_error(request.POST, 'ids')
    database = get_or_error(request.POST, 'database')
    ids = np.array(list(map(int, ids.split(','))))
    ids = np.sort(ids)

    chksum = IdOrderedModel.calc_chksum(ids)
    existing = TemporaryDatabase.objects.filter(chksum=chksum,
                                                user=request.user).first()
    if existing is not None:
        return dict(origin='make_tmpdb',
                    success=True,
                    warning=None,
                    payload=dict(name=existing.name, created=False))

    name = uuid.uuid4().hex
    tmpdb = TemporaryDatabase(name=name,
                              user=request.user,
                              _databases=database)
    tmpdb.ids = ids
    tmpdb.save()

    return dict(origin='make_tmpdb',
                success=True,
                warning=None,
                payload=dict(name=name, created=True))
Ejemplo n.º 2
0
    def form_valid(self, form):
        user = self.request.user
        form_data = form.cleaned_data
        ord_id = form_data.get('ordination', None)
        dm_id = form_data.get('data_matrix', None)

        has_error = False

        if (not ord_id and not dm_id) or (ord_id and dm_id):
            form.add_error(
                'ordination',
                'Either ordination or data matrix must be chosen, but not both'
            )
            form.add_error(
                'data_matrix',
                'Either ordination or data matrix must be chosen, but not both'
            )
            has_error = True

        if not has_error:
            if dm_id:
                dm = get_or_error(DataMatrix, dict(id=dm_id))
                si = SimilarityIndex.objects.filter(dm=dm).first()
                if si is not None:
                    form.add_error('data_matrix', 'Already extracted')
                    has_error = True
                else:
                    si = SimilarityIndex(dm=dm)
            else:
                ord = get_or_error(Ordination, dict(id=ord_id))
                si = SimilarityIndex.objects.filter(ord=ord).first()
                if si is not None:
                    form.add_error('ordination', 'Already extracted')
                    has_error = True
                else:
                    si = SimilarityIndex(ord=ord, dm=ord.dm)

        if has_error:
            context = self.get_context_data()
            context['form'] = form
            rendered = render_to_string(
                'partials/similarity-selection-form.html', context=context)
            return HttpResponse(
                json.dumps(dict(message=dict(success=False, html=rendered))))

        si.save()
        task = Task(user=user,
                    target='{}:{}'.format(SimilarityIndex.__name__, si.id))
        task.save()
        si.task = task
        si.save()

        delay_in_production(calculate_similarity, task.id)

        context = self.get_context_data()
        context['task'] = task
        rendered = render_to_string(
            'partials/similarity-extraction-tasks.html', context=context)
        return HttpResponse(
            json.dumps(dict(message=dict(success=True, html=rendered))))
Ejemplo n.º 3
0
def change_tmpdb_name(request):
    """
    Save a temporary list of segment IDs for the syllable view to display
    :param request:
    :return:
    """
    old_name = get_or_error(request.POST, 'old-name')
    new_name = get_or_error(request.POST, 'new-name')

    if not re.match("^[a-zA-Z0-9_-]+$", new_name):
        raise CustomAssertionError(
            'Name can only contain alphabets, numbers, dashes and underscores')

    tmpdb = get_or_error(TemporaryDatabase,
                         dict(name=old_name, user=request.user))
    with transaction.atomic():
        if TemporaryDatabase.objects.filter(name=new_name,
                                            user=request.user).exists():
            raise CustomAssertionError(
                'Temporary database named {} already exists'.format(new_name))
        tmpdb.name = new_name
        tmpdb.save()

    return dict(origin='change_tmpdb_name',
                success=True,
                warning=None,
                payload=None)
    def post_init(self, options):
        super(Command, self).post_init(options)

        dmid = options['dmid']
        ordid = options['ordid']
        self.class_aggregation = options['class_aggregation']

        if (dmid is None) == (ordid is None):
            raise Exception(
                'Either but not both --dm-id and --ord-id should be given')

        if dmid:
            self.dm = get_or_error(DataMatrix, dict(id=dmid))
            self.ord = None
        else:
            self.ord = get_or_error(Ordination, dict(id=ordid))
            self.dm = self.ord.dm

        sids_path = self.dm.get_sids_path()
        source_bytes_path = self.dm.get_bytes_path()

        self.sids = bytes_to_ndarray(sids_path, np.int32)
        self.tids = get_tids(self.sids)
        coordinates = get_rawdata_from_binary(source_bytes_path,
                                              len(self.sids))
        coordinates = drop_useless_columns(coordinates)
        coordinates = zscore(coordinates)
        coordinates[np.where(np.isinf(coordinates))] = 0
        coordinates[np.where(np.isnan(coordinates))] = 0
        self.coordinates = coordinates
Ejemplo n.º 5
0
    def get_context_data(self, **kwargs):
        context = super(TensorvizView, self).get_context_data(**kwargs)
        tensor_name = get_or_error(kwargs, 'tensor_name')
        tensor = get_or_error(DerivedTensorData, dict(name=tensor_name))

        context['config_file'] = '/' + tensor.get_config_path()
        return context
Ejemplo n.º 6
0
def request_database_access(request):
    user = request.user

    database_id = get_or_error(request.POST, 'database-id')
    database = get_or_error(Database, dict(id=database_id))

    requested_permission = DatabasePermission.ANNOTATE
    already_granted = DatabaseAssignment.objects \
        .filter(user=user, database=database, permission__gte=requested_permission).exists()

    if already_granted:
        raise CustomAssertionError(
            'You\'re already granted equal or greater permission.')

    access_request = AccessRequest.objects.filter(user=user,
                                                  database=database).first()
    if access_request and access_request.permission >= requested_permission:
        raise CustomAssertionError(
            'You\'ve already requested equal or greater permission.')

    if access_request is None:
        access_request = AccessRequest(user=user, database=database)

    access_request.permission = requested_permission
    access_request.save()
    return True
Ejemplo n.º 7
0
def delete_audio_files(request):
    """
    Delete audio files given ids. Also remove all existing audio files.
    :param request: must contain a list of ids and the id of the database where these files come from
    :return:
    """
    user = request.user
    ids = json.loads(get_or_error(request.POST, 'ids'))
    database_id = get_or_error(request.POST, 'database')
    database = get_or_error(Database, dict(id=database_id))
    assert_permission(user, database, DatabasePermission.DELETE_FILES)

    # Check that the ids to delete actually come from this database
    audio_files = AudioFile.objects.filter(id__in=ids)
    audio_files_ids = audio_files.values_list('id', flat=True)

    non_existent_ids = [x for x in ids if x not in audio_files_ids]

    if non_existent_ids:
        raise CustomAssertionError(
            'You\'re trying to delete files that don\'t belong to database {}. '
            'Are you messing with Javascript?'.format(database.name))

    segments = Segment.objects.filter(audio_file__in=audio_files)

    segments.update(active=False)
    audio_files.update(active=False)

    delay_in_production(delete_segments_async)
    delay_in_production(delete_audio_files_async)

    return True
Ejemplo n.º 8
0
def add_collaborator(request):
    you = request.user
    user_name_or_email = get_or_error(request.POST, 'user')
    database_id = get_or_error(request.POST, 'database')
    database = get_or_error(Database, dict(id=database_id))

    assert_permission(you, database, DatabasePermission.ASSIGN_USER)

    user = User.objects.filter(
        Q(username__iexact=user_name_or_email)
        | Q(email__iexact=user_name_or_email)).first()
    if user is None:
        raise CustomAssertionError('This user doesn\'t exist.')

    already_granted = DatabaseAssignment.objects.filter(
        user=user, database=database).exists()

    if already_granted:
        raise CustomAssertionError(
            'User\'s already been granted access. You can change their permission in the table.'
        )

    database_assignment = DatabaseAssignment(
        user=user, database=database, permission=DatabasePermission.VIEW)
    database_assignment.save()

    _, rows = bulk_get_database_assignment([database_assignment],
                                           DotMap(database=database.id))
    return rows[0]
Ejemplo n.º 9
0
def remove_collaborators(request):
    you = request.user
    dbassignments_ids = json.loads(get_or_error(request.POST, 'ids'))
    database_id = get_or_error(request.POST, 'database')
    database = get_or_error(Database, dict(id=database_id))
    dbassignments = DatabaseAssignment.objects.filter(id__in=dbassignments_ids,
                                                      database=database)

    assert_permission(you, database, DatabasePermission.ASSIGN_USER)

    if len(dbassignments) != len(dbassignments_ids):
        raise CustomAssertionError(
            'ERROR: one or more collaborators are not assigned to this database.'
        )

    if dbassignments.filter(user=you).exists():
        raise CustomAssertionError('ERROR: you can\'t remove yourself.')

    if dbassignments.filter(
            permission=DatabasePermission.ASSIGN_USER).exists():
        raise CustomAssertionError(
            'ERROR: you can\'t remove other admins of this database.')

    dbassignments.delete()
    return True
Ejemplo n.º 10
0
def save_history(request):
    """
    Save a copy of all ExtraAttrValue (labels, notes, ...) in a HistoryEntry
    :param request: must specify a comment to store with this copy
    :return: name of the zip file created
    :version: 2.0.0
    """
    version = 4
    user = request.user

    comment = get_or_error(request.POST, 'comment')
    database_id = get_or_error(request.POST, 'database')
    backup_type = get_or_error(request.POST, 'type')

    database = get_or_error(Database, dict(id=database_id))
    assert_permission(user, database, DatabasePermission.VIEW)
    assert_values(backup_type, ['labels', 'segmentation'])

    meta = dict(database=database_id,
                user=user.id,
                time=timezone.now(),
                version=version,
                note=comment,
                type=backup_type)

    zip_buffer = io.BytesIO()
    with zipfile.ZipFile(zip_buffer, "a", zipfile.ZIP_BZIP2,
                         False) as zip_file:
        zip_file.writestr('meta.json', json.dumps(meta))
        zip_file.writestr('root.extraattrvalue.json',
                          'here for checking purpose')

        if backup_type == 'labels':
            save_label_history(database, user, zip_file)
        else:
            save_segmentation_history(database, user, zip_file)

    binary_content = zip_buffer.getvalue()

    he = HistoryEntry.objects.create(user=user,
                                     time=timezone.now(),
                                     database=database,
                                     version=version,
                                     note=comment,
                                     type=backup_type)
    filename = he.filename
    filepath = history_path(filename)
    ensure_parent_folder_exists(filepath)

    with open(filepath, 'wb') as f:
        f.write(binary_content)

    tz_offset = request.session['detected_tz']
    tz = offset_to_timezone(tz_offset)

    _, rows = bulk_get_history_entries([he],
                                       DotMap(user=user,
                                              database=database_id,
                                              tz=tz))
    return rows[0]
Ejemplo n.º 11
0
def get_segment_audio_data(request):
    """
    Return a playable audio segment given the segment id
    :param request: must specify segment-id, this is the ID of a Segment object to be played
    :return: a binary blob specified as audio/ogg (or whatever the format is), playable and volume set to -10dB
    """
    user = request.user

    segment_id = get_or_error(request.POST, 'segment-id')
    segment = get_or_error(Segment, dict(id=segment_id))
    audio_file = segment.audio_file
    assert_permission(user, audio_file.database, DatabasePermission.VIEW)

    start = segment.start_time_ms
    end = segment.end_time_ms

    if audio_file.is_original():
        database_id = audio_file.database.id
        audio_file_name = audio_file.name
    else:
        database_id = audio_file.original.database.id
        audio_file_name = audio_file.original.name

    return _cached_get_segment_audio_data(audio_file_name, database_id,
                                          audio_file.fs, start, end)
Ejemplo n.º 12
0
def get_audio_file_url(request):
    user = request.user

    file_id = get_or_error(request.POST, 'file-id')
    audio_file = get_or_error(AudioFile, dict(id=file_id))
    assert_permission(user, audio_file.database, DatabasePermission.VIEW)

    # The audio file might have sample rate being faked - this is the only sample rate value the browser can see.
    # It has no idea what the real fs is unless we tell it.
    # However, when converted to MP3, the real fs can be changed anyways. For example, 44100Hz (wav) -> 48000 (mp3)
    # in which case there is a difference in real_fs and what the browser can see.
    # In this case we must tell the browser to use 48000 as the real_fs of the mp3 file.
    # We do that by omitting real_fs (returning NULL to the browser)
    real_fs = None
    if audio_file.fake_fs is not None:
        real_fs = audio_file.fs

    if audio_file.fs > 48000:
        real_fs = audio_file.fs

    return {
        'url':
        audio_path(audio_file, settings.AUDIO_COMPRESSED_FORMAT, for_url=True),
        'real-fs':
        real_fs
    }
Ejemplo n.º 13
0
def get_audio_file_url(request):
    user = request.user

    file_id = get_or_error(request.POST, 'file-id')
    audio_file = get_or_error(AudioFile, dict(id=file_id))
    assert_permission(user, audio_file.database, DatabasePermission.VIEW)

    return audio_path(audio_file,
                      settings.AUDIO_COMPRESSED_FORMAT,
                      for_url=True)
Ejemplo n.º 14
0
    def handle(self, database_name, username, *args, **options):
        wb = openpyxl.Workbook()
        ws = wb.create_sheet('Syllable', 0)

        database = get_or_error(Database, dict(name__iexact=database_name))
        user = get_or_error(User, dict(username__iexact=username))

        table = tables['segment-info']
        column_names = []
        slugs = []
        col_max_widths = {}

        for column in table['columns']:
            column_name = column.get("name", None)
            if column_name is not None:
                if column_name.startswith('_'):
                    continue
                column_names.append(column_name)
                slug = column['slug']
                slugs.append(slug)
                col_max_widths[slug] = len(column_name)

        slug_to_col_ind = {}
        for ind, slug in enumerate(slugs):
            slug_to_col_ind[slug] = ind + 1

        ws.append(column_names)
        for col_name in column_names:
            col_max_widths[col_name] = len(col_name)

        segments_ids = Segment.objects.filter(audio_file__database=database).values_list('id', flat=True)
        ids, rows = bulk_get_segment_info(segments_ids, DotMap(dict(viewas=user, user=user, database=database.id)))

        excel_row_idx = 1
        for row in rows[:2]:
            excel_row_idx += 1
            for slug, val in row.items():
                excel_col_idx = slug_to_col_ind.get(slug, None)
                if excel_col_idx is None:
                    continue
                if slug == 'spectrogram':
                    page = val // PAGE_CAPACITY
                    image_url = "user_data/spect/syllable/{}/{}.png".format(page, val)
                    width = syllable_spectrogram_handler(image_url, excel_row_idx, excel_col_idx, ws)
                else:
                    ws[get_cell_address(excel_col_idx, excel_row_idx)] = val
                    width = len(str(val))
                if col_max_widths[slug] < width:
                    col_max_widths[slug] = width

        for ind, slug in enumerate(col_max_widths):
            ws.column_dimensions[get_column_letter(ind + 1)].width = col_max_widths[slug]

        excel_filename = 'export_koe_{}.xlsx'.format(database_name)
        wb.save(excel_filename)
Ejemplo n.º 15
0
def get_label_options(request):
    file_id = request.POST.get('file-id', None)
    database_id = request.POST.get('database-id', None)
    tmpdb_id = request.POST.get('tmpdb-id', None)

    if file_id is None and database_id is None and tmpdb_id is None:
        raise CustomAssertionError('Need file-id or database-id or tmpdb-id')

    if file_id:
        audio_file = get_or_error(AudioFile, dict(id=file_id))
        database = audio_file.database
    elif database_id:
        database = get_or_error(Database, dict(id=database_id))
    else:
        database = get_or_error(TemporaryDatabase, dict(id=tmpdb_id))

    user = request.user

    if isinstance(database, Database):
        assert_permission(user, database, DatabasePermission.VIEW)
        sids = list(
            Segment.objects.filter(audio_file__database=database).values_list(
                'id', flat=True))
    else:
        sids = database.ids

    label_attr = ExtraAttr.objects.get(klass=Segment.__name__, name='label')
    family_attr = ExtraAttr.objects.get(klass=Segment.__name__,
                                        name='label_family')
    subfamily_attr = ExtraAttr.objects.get(klass=Segment.__name__,
                                           name='label_subfamily')

    extra_attr_values = ExtraAttrValue.objects.filter(user=user,
                                                      owner_id__in=sids)
    labels_and_counts = extra_attr_values.filter(
        attr=label_attr).values_list('value').annotate(c=Count('value'))
    families_and_counts = extra_attr_values.filter(
        attr=family_attr).values_list('value').annotate(c=Count('value'))
    subfams_and_counts = extra_attr_values.filter(
        attr=subfamily_attr).values_list('value').annotate(c=Count('value'))

    labels_to_counts = {l: c for l, c in labels_and_counts}
    fams_to_counts = {l: c for l, c in families_and_counts}
    subfams_to_counts = {l: c for l, c in subfams_and_counts}

    retval = {
        'label': labels_to_counts,
        'label_family': fams_to_counts,
        'label_subfamily': subfams_to_counts
    }

    return dict(origin='request_database_access',
                success=True,
                warning=None,
                payload=retval)
Ejemplo n.º 16
0
def delete_database(request):
    user = request.user
    database_id = get_or_error(request.POST, 'database-id')
    database = get_or_error(Database, dict(id=database_id))

    assert_permission(user, database, DatabasePermission.ASSIGN_USER)

    database.active = False
    database.save()

    delay_in_production(delete_database_async)
def get_data_matrix_config(request):
    dm_id = get_or_error(request.POST, 'data-matrix-id')
    dm = get_or_error(DataMatrix, dict(id=dm_id))

    selections = dict(
        features=list(map(int, dm.features_hash.split('-'))),
        aggregations=list(map(int, dm.aggregations_hash.split('-'))),
        ndims=dm.ndims,
    )

    return selections
Ejemplo n.º 18
0
    def prepare_data_for_analysis(self, pkl_filename, options):
        label_level = options['label_level']
        cdm = options['cdm']
        dmid = options['dmid']
        annotator_name = options['annotator_name']

        methods = dict(mean=np.mean, median=np.median)
        method = get_or_error(
            methods, cdm,
            'Unknown value {} for --class-distance-method.'.format(cdm))
        dm = get_dm(dmid)
        sids_path = dm.get_sids_path()
        source_bytes_path = dm.get_bytes_path()

        sids = bytes_to_ndarray(sids_path, np.int32)
        coordinates = get_rawdata_from_binary(source_bytes_path, len(sids))
        coordinates = drop_useless_columns(coordinates)
        coordinates = zscore(coordinates)
        coordinates[np.where(np.isinf(coordinates))] = 0
        coordinates[np.where(np.isnan(coordinates))] = 0

        if annotator_name is not None:
            annotator = get_or_error(User,
                                     dict(username__iexact=annotator_name))
            label_arr, syl_label_enum_arr = get_syllable_labels(
                annotator, label_level, sids)
            nlabels = len(label_arr)
            distmat, classes_info = calc_class_dist_by_syl_features(
                syl_label_enum_arr, nlabels, coordinates, method)
            dist_triu = mat2triu(distmat)
        else:
            dist_triu = distance.pdist(coordinates, 'euclidean')
            label_arr = []
            syl_label_enum_arr = []
            classes_info = []
            for sind, sid in enumerate(sids):
                label = str(sind)
                label_arr.append(label)
                syl_label_enum_arr.append(sind)
                classes_info.append([sind])

        tree = linkage(dist_triu, method='average')

        saved_dict = dict(tree=tree,
                          dbid=dm.database.id,
                          sids=sids,
                          unique_labels=label_arr,
                          classes_info=classes_info)

        with open(pkl_filename, 'wb') as f:
            pickle.dump(saved_dict, f)

        return saved_dict
def set_preference(request):
    user = request.user
    key = get_or_error(request.POST, 'key')
    value = get_or_error(request.POST, 'value')
    pref = Preference.objects.filter(user=user, key=key).first()
    if pref is None:
        pref = Preference(user=user, key=key, value=value)
        pref.save()

    if pref.value != value:
        pref.value = value
        pref.save()

    return True
Ejemplo n.º 20
0
def get_data_matrix_config(request):
    dm_id = get_or_error(request.POST, 'data-matrix-id')
    dm = get_or_error(DataMatrix, dict(id=dm_id))

    selections = dict(
        features=list(map(int, dm.features_hash.split('-'))),
        aggregations=list(map(int, dm.aggregations_hash.split('-'))),
        ndims=dm.ndims,
    )

    return dict(origin='request_database_access',
                success=True,
                warning=None,
                payload=selections)
Ejemplo n.º 21
0
def merge_audio_chunks(request):
    """
    This action should be called after the last audio chunk is uploaded.
    It will merge all the saved chunks (foo.wav__1, foo.wav__2, etc...) into foo.wav
    And import to the database
    :param request:
    :return:
    """
    user = request.user
    params = request.POST
    name = params['name']
    chunk_count = int(params['chunkCount'])
    max_fs = int(request.POST.get('browser-fs', 0))

    if name.lower().endswith('.wav'):
        name = name[:-4]

    database_id = get_or_error(request.POST, 'database')
    database = get_or_error(Database, dict(id=database_id))
    assert_permission(user, database, DatabasePermission.ADD_FILES)

    wav_file_path = data_path('audio/wav/{}'.format(database_id),
                              name + '.wav')

    with open(wav_file_path, 'wb') as combined_file:
        for i in range(chunk_count):
            chunk_file_path = wav_file_path + '__' + str(i)
            with open(chunk_file_path, 'rb') as chunk_file:
                combined_file.write(chunk_file.read())

    size, comp, num_channels, fs, sbytes, block_align, bitrate, bytes, dtype = read_wav_info(
        wav_file_path)
    if comp == 3:
        warning('File is IEEE format. Convert to standard WAV')
        audio = pydub.AudioSegment.from_file(wav_file_path)
        audio.export(wav_file_path, format='wav')

    audio_file = _import_and_convert_audio_file(database, combined_file,
                                                max_fs)

    for i in range(chunk_count):
        chunk_file_path = wav_file_path + '__' + str(i)
        os.remove(chunk_file_path)

    added_files = AudioFile.objects.filter(id=audio_file.id)
    _, rows = get_sequence_info_empty_songs(added_files)
    return dict(origin='merge_audio_chunks',
                success=True,
                warning=None,
                payload=rows)
Ejemplo n.º 22
0
def delete_segments(request):
    user = request.user
    ids = json.loads(get_or_error(request.POST, 'ids'))
    database_id = get_or_error(request.POST, 'database-id')
    database = get_or_error(Database, dict(id=database_id))
    assert_permission(user, database, DatabasePermission.MODIFY_SEGMENTS)

    segments = Segment.objects.filter(id__in=ids,
                                      audio_file__database=database)
    segments.update(active=False)

    delay_in_production(delete_segments_async)

    return True
Ejemplo n.º 23
0
def get_datamatrix_file_paths(request):
    dmid = get_or_error(request.POST, 'dmid')
    dm = get_or_error(DataMatrix, dict(id=dmid))

    bytes_path = dm.get_bytes_path()
    sids_path = dm.get_sids_path()
    cols_path = dm.get_cols_path()

    if dm.database:
        database_name = dm.database.name
    else:
        database_name = dm.tmpdb.name

    retval = {'bytes-path': bytes_path, 'sids-path': sids_path, 'database-name': database_name, 'cols-path': cols_path}
    return dict(origin='get_datamatrix_file_paths', success=True, warning=None, payload=retval)
Ejemplo n.º 24
0
def get_unsegmented_songs(request):
    database_id = get_or_error(request.POST, 'database-id')
    user = request.user

    database = get_or_error(Database, dict(id=database_id))
    assert_permission(user, database, DatabasePermission.MODIFY_SEGMENTS)

    existing_file_names = AudioFile.objects.filter(
        database=database).values_list('name', flat=True)
    file_with_segments = Segment.objects.filter(audio_file__database=database)\
        .values_list('audio_file__name', flat=True).distinct()

    af_with_no_segments = list(
        set(existing_file_names) - set(file_with_segments))
    return af_with_no_segments
Ejemplo n.º 25
0
def delete_database(request):
    user = request.user
    database_id = get_or_error(request.POST, 'database-id')
    database = get_or_error(Database, dict(id=database_id))

    assert_permission(user, database, DatabasePermission.ASSIGN_USER)

    database.active = False
    database.save()

    delay_in_production(delete_database_async)
    return dict(origin='delete_database',
                success=True,
                warning=None,
                payload=None)
def get_tensor_data_file_paths(request):
    tensor_name = get_or_error(request.POST, 'tensor-name')
    tensor = get_or_error(DerivedTensorData, dict(name=tensor_name))

    sids_path = tensor.full_tensor.get_sids_path()
    bytes_path = tensor.get_bytes_path()

    if not os.path.isfile(bytes_path):
        bytes_path = tensor.full_tensor.get_bytes_path()

    return {
        'bytes-path': bytes_path,
        'sids-path': sids_path,
        'database-name': tensor.database.name
    }
Ejemplo n.º 27
0
def create_database(request):
    user = request.user
    name = get_or_error(request.POST, 'name')
    if not re.match('^[a-zA-Z0-9_-]+$', name):
        raise CustomAssertionError(
            'Name can only contain alphabets, numbers, dashes and underscores')

    if Database.objects.filter(name__iexact=name).exists():
        raise CustomAssertionError(
            'Database with name {} already exists.'.format(name))

    database = Database(name=name)
    database.save()

    media_dir = settings.MEDIA_URL[1:]
    new_wav_dir = os.path.join(settings.BASE_DIR, media_dir, 'audio', 'wav',
                               str(database.id))
    new_compressed_dir = os.path.join(settings.BASE_DIR, media_dir, 'audio',
                                      settings.AUDIO_COMPRESSED_FORMAT,
                                      str(database.id))

    os.mkdir(new_wav_dir)
    os.mkdir(new_compressed_dir)

    # Now assign this database to this user, and switch the working database to this new one
    da = DatabaseAssignment(user=user,
                            database=database,
                            permission=DatabasePermission.ASSIGN_USER)
    da.save()

    permission_str = DatabasePermission.get_name(
        DatabasePermission.ASSIGN_USER)
    return dict(id=database.id, name=name, permission=permission_str)
Ejemplo n.º 28
0
def get_database_spectrogram_preference(request):
    file_id = get_or_error(request.POST, 'file-id')
    user = request.user

    database_id = AudioFile.objects.get(id=file_id).database.id

    cm = ExtraAttrValue.objects.filter(user__username=user,
                                       attr=settings.ATTRS.database.cm,
                                       owner_id=database_id).first()
    zoom = ExtraAttrValue.objects.filter(user__username=user,
                                         attr=settings.ATTRS.database.zoom,
                                         owner_id=database_id).first()

    retval = dict()

    if cm is not None:
        retval['cm'] = cm.value

    if zoom is not None:
        retval['zoom'] = int(zoom.value)

    return dict(origin='get_database_spectrogram_preference',
                success=True,
                warning=None,
                payload=retval)
Ejemplo n.º 29
0
def extract_syllables(database_name, spect_dir, format):
    database = get_or_error(Database, dict(name__iexact=database_name))
    segments = Segment.objects.filter(audio_file__database=database)

    extractor = extractors[format]

    audio_file_dict = {}
    for seg in segments:
        af = seg.audio_file
        if af in audio_file_dict:
            info = audio_file_dict[af]
        else:
            info = []
            audio_file_dict[af] = info
        info.append((seg.id, seg.start_time_ms, seg.end_time_ms))

    bar = Bar('Exporting segments ...', max=len(segments))

    for af, info in audio_file_dict.items():
        wav_file_path = wav_path(af)
        fs = af.fs

        for sid, start, end in info:
            spect_name = '{}.{}'.format(sid, format)
            spect_path = os.path.join(spect_dir, spect_name)

            if not os.path.isfile(spect_path):
                extractor(wav_file_path, fs, start, end, spect_path)

            bar.next()
    bar.finish()
Ejemplo n.º 30
0
def extra_syntax_context(request, context):
    database = context['current_database']
    viewas = context['viewas']

    if isinstance(database, Database):
        q = Q(dm__database=database)
        context['db_type'] = 'Database'
    else:
        q = Q(dm__tmpdb=database)
        context['db_type'] = 'Collection'

    ordinations = Ordination.objects.filter(
        q & (Q(task=None) | Q(task__stage=TaskProgressStage.COMPLETED)))
    ord_id = request.GET.get('ordination', None)
    if ord_id is None:
        current_ordination = ordinations.first()
    else:
        current_ordination = get_or_error(Ordination, dict(id=ord_id))
    context['current_ordination'] = current_ordination
    context['ordinations'] = ordinations

    if current_ordination:
        bytes_path = current_ordination.get_bytes_path()
        metadata_path = reverse('ordination-meta',
                                kwargs={
                                    'ord_id': current_ordination.id,
                                    'viewas': viewas.username
                                })

        context['metadata_path'] = metadata_path
        context['bytes_path'] = '/' + bytes_path
    return context