Exemple #1
0
async def post_file(file: UploadFile, user: User) -> Union[str, None]:
    try:
        filename = Path(get_random_filename(with_subdirs=True))
        suffixes = Path(file.filename).suffixes
        if len(suffixes) > 0:
            base = filename.parent
            name = filename.name
            for suffix in suffixes:
                name += suffix
            filename = base / name
        relative_path = static_path() / filename
        absolute_path = project_root() / relative_path
        if not absolute_path.parent.is_dir():
            absolute_path.parent.mkdir(parents=True, exist_ok=True)

        # get file
        with open(str(absolute_path), "wb") as buffer:
            shutil.copyfileobj(file.file, buffer)
        url = f"{base_url()}/store/{filename}"
        # put record to db
        store_row = StoreModel(user_id=user.id,
                               email=user.email,
                               enterdate=datetime.now(),
                               filename=file.filename,
                               url=str(url),
                               path=str(relative_path))
        session = Session()
        session.add(store_row)
        session.commit()
        session.close()
        return url
    except Exception as e:
        print("exception", e)
        return None
Exemple #2
0
async def delete_file(url: str, user: User) -> bool:

    session = Session()
    store_row = session.query(StoreModel).filter_by(url=url,
                                                    user_id=user.id).first()
    if store_row is not None:
        absolute_path = project_root() / store_row.path
        absolute_path.unlink(missing_ok=True)
        session.delete(store_row)
        session.commit()
        session.close()
        return True
    else:
        return False
import os
import random
from skimage import io
import numpy as np
from typing import Tuple
from torch.utils.data import Dataset, DataLoader
import torch
from torchvision import transforms
import torchvision.transforms.functional as TF

from src.utils import project_root
from src.visualization_utils import show_img
from src.image_segmentation.utils import thirty_six_crop

PATH_DATA_WEIZMANN = os.path.join(project_root(), "data", "weizmann_horse")
PATH_SAVE_HORSE = os.path.join(project_root(), "saved_results",
                               "weizmann_horse")


class WeizmannHorseDataset(Dataset):
    def __init__(self,
                 img_dir: str,
                 mask_dir: str,
                 subset='train',
                 random_mirroring=True,
                 thirty_six_cropping=False):
        """
        Args:
            img_dir(string): Path to the image file (training image)
            mask_dir(string): Path to the mask file (segmentation result)
            subset(string): 'train' or 'valid' or 'test'
Exemple #4
0
@store_router.delete("/", response_description="Files deleted from the database")
async def delete_files_data(
        urls: List[str] = Body(...),
        user: User = Depends(fastapi_users.get_current_active_user)
):
    if not user.is_superuser:
        return ErrorResponseModel(
            "An error occurred",
            503,
            "User does not have admin privilegies",
        )
    else:
        results = []
        for url in urls:
            result = await delete_file(url, user)
            results.append(result)
        error_urls = len([True for result in results if result is False])
        success_urls = len([True for result in results if result is True])
        if error_urls > 0 and success_urls > 0:
            return ResponseModel(results, "Not all Files deleted successfully!")
        elif success_urls == 0 and error_urls > 0:
            return ErrorResponseModel(
                "An error occurred",
                503,
                "Error while delete files into the database",
            )
        elif error_urls == 0 and success_urls > 0:
            return ResponseModel(results, "Files deleted successfully.")

static_files = StaticFiles(directory=str(project_root()/static_path()))
Exemple #5
0
from torch.utils.data import DataLoader
import torch
import numpy as np
import os
from typing import Tuple

from src.image_tagging.model.conv_net import ConvNet
from src.utils import project_root
from src.image_tagging.flickr_dataset import (FlickrTaggingDataset,
                                              FlickrTaggingDatasetFeatures,
                                              inv_normalize, class_names)
from src.visualization_utils import show_grid_imgs

PATH_FLICKR_DATA = os.path.join(project_root(), "data", "flickr")
PATH_SAVE_FLICKR = os.path.join(project_root(), "saved_results", "flickr")

TRAIN_LABEL_FILE = os.path.join(PATH_SAVE_FLICKR, 'train_labels_1k.pt')
VAL_LABEL_FILE = os.path.join(PATH_SAVE_FLICKR, 'val_labels.pt')
TRAIN_SAVE_IMG_FILE = os.path.join(PATH_SAVE_FLICKR, 'train_imgs_1k.pt')
VAL_SAVE_IMG_FILE = os.path.join(PATH_SAVE_FLICKR, 'val_imgs.pt')

TRAIN_FEATURE_FILE = os.path.join(PATH_SAVE_FLICKR,
                                  'train_1k_features_20_epochs.pt')
VAL_FEATURE_FILE = os.path.join(PATH_SAVE_FLICKR, 'val_features_20_epochs.pt')

TRAIN_UNARY_FILE = os.path.join(PATH_SAVE_FLICKR, 'train_unary_20_epochs.pt')
VAL_UNARY_FILE = os.path.join(PATH_SAVE_FLICKR, 'val_unary_20_epochs.pt')


def load_train_dataset_flickr(
        path_data: str,
Exemple #6
0
import arff
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
from sklearn.metrics import f1_score
import os
import random
from typing import Tuple

from src.model.base_model import BaseModel
from src.utils import project_root, MyDataset
from src.visualization_utils import plot_results

PATH_MODELS_ML_BIB = os.path.join(project_root(), "saved_results", "bibtex")
PATH_BIBTEX = os.path.join(project_root(), "data", "bibtex")


def get_bibtex(dir_path: str, use_train: bool):
    """
    Load the bibtex dataset.
    __author__ = "Michael Gygli, ETH Zurich"
    from https://github.com/gyglim/dvn/blob/master/mlc_datasets/__init__.py
    number of labels ("tags") = 159
    dimension of inputs = 1836
    Returns
    -------
    txt_labels (list)
        the 159 tags, e.g. 'TAG_system', 'TAG_social_nets'
    txt_inputs (list)
        the 1836 attribute words, e.g. 'dependent', 'always'