Esempio n. 1
0
    def setup_method(self):
        """
        Set up parameters for test methods
        """
        Logger.init()
        Logger.remove_file_logger()

        self.params = DmdsParams()

        # get one entry from the database
        Config.add_config('./config.ini')
        collection_details = ("local_mongodb", "depth", "driving_stereo")
        scenes = [
            "2018-10-26-15-24-18",
            "2018-10-19-09-30-39",
        ]
        self.train_data = []
        self.val_data = []
        self.collection_details = []

        # get ids
        for scene_token in scenes:
            td, vd = load_ids(collection_details,
                              data_split=(80, 20),
                              limit=100,
                              shuffle_data=False,
                              mongodb_filter={"scene_token": scene_token},
                              sort_by={"timestamp": 1})
            self.train_data.append(td)
            self.val_data.append(vd)
            self.collection_details.append(collection_details)
Esempio n. 2
0
    def setup_method(self):
        Logger.init()
        Logger.remove_file_logger()

        self.params = CenternetParams(len(OD_CLASS_MAPPING))
        self.params.REGRESSION_FIELDS["l_shape"].active = True
        self.params.REGRESSION_FIELDS["3d_info"].active = True

        # get some entries from the database
        Config.add_config('./config.ini')
        self.collection_details = ("local_mongodb", "labels", "nuscenes_train")

        # Create Data Generators
        self.train_data, self.val_data = load_ids(self.collection_details,
                                                  data_split=(70, 30),
                                                  limit=250)
Esempio n. 3
0
    def setup_method(self):
        """
        Set up parameters for test methods
        """
        Logger.init()
        Logger.remove_file_logger()

        self.params = SemsegParams()

        # get one entry from the database
        Config.add_config('./config.ini')
        self.collection_details = ("local_mongodb", "labels", "comma10k")

        # Create Data Generators
        self.train_data, self.val_data = load_ids(self.collection_details,
                                                  data_split=(70, 30),
                                                  limit=30)
Esempio n. 4
0
    def setup_method(self):
        Logger.init()
        Logger.remove_file_logger()

        self.params = MultitaskParams(len(OD_CLASS_MAPPING.items()))

        # get one entry from the database
        Config.add_config('./config.ini')
        self.collection_details = ("local_mongodb", "labels", "nuscenes_train")

        # Create Data Generators
        self.td, self.vd = load_ids(
            self.collection_details,
            data_split=(70, 30),
            shuffle_data=True,
            limit=30
        )
Esempio n. 5
0
    def setup_method(self):
        """
        Set up parameters for test methods
        """
        Logger.init()
        Logger.remove_file_logger()

        self.params = CentertrackerParams(len(OD_CLASS_MAPPING))

        # get some entries from the database
        Config.add_config('./config.ini')
        self.collection_details = ("local_mongodb", "labels", "kitti")

        # Create Data Generators
        self.train_data, self.val_data = load_ids(
            self.collection_details,
            data_split=(70, 30),
            limit=100
        )
Esempio n. 6
0
def load_ids(
        col_details: Tuple[str, str, str],
        data_split: Tuple = (60, 40),
        sort_by: dict = None,
        limit: int = None,
        shuffle_data: bool = False,
        shuffle_steps: int = 1,
        mongodb_filter: dict = {}):
    """
    Load MongoDB Document Ids from a collection and split them in training and validation data set
    :param col_details: MongoDB collection details with a tuple of 3 string entries
                        [client name (from config), database name, collection name]
    :param data_split: Tuple of percentage of training and test data e.g. (60, 40) for 60% training and 40% test data
    :param sort_by: MongoDB sort expression. e.g. { created_at: -1 }
    :param limit: maximum number of ids that should be fetched
    :param shuffle_data: determine if dataset should be shuffled before splitting it to train and validation data
    :param shuffle_steps: step size for the shuffling (e.g. for time series you want to have a shuffle_size of
                          BATCH_SIZE + (TIME_STEPS - 1)
    :param mongodb_filter: apply to search when finding all ids
    :return: training and validation data
    """
    Logger.logger.info("Loading Document IDs from MongoDB")
    mongo_con = MongoDBConnect()
    mongo_con.add_connections_from_config(Config.get_config_parser())
    collection = mongo_con.get_collection(*col_details)

    if sort_by is None:
        sort_by = {"_id": 1}

    db_cursor = collection.find(mongodb_filter, sort_by)

    if limit:
        db_cursor.limit(limit)
    tmp_docs = []
    for doc in db_cursor:
        tmp_docs.append(doc["_id"])

    if shuffle_data:
        if shuffle_steps == 1:
            shuffle(tmp_docs)
        else:
            # if reshape the tmp_docs must be a multiple of shuffle_steps, cut ids that do no fit
            overflow = len(tmp_docs) % shuffle_steps
            tmp_docs = tmp_docs[:len(tmp_docs) - overflow]
            x = np.reshape(tmp_docs, (-1, shuffle_steps))
            np.random.shuffle(x)
            tmp_docs = x.flatten().tolist()

    train_range = int((data_split[0] / 100) * len(tmp_docs))
    train_data = tmp_docs[:train_range]
    val_data = tmp_docs[train_range:]
    Logger.logger.info("Documents loaded (train|validation): {0} | {1}\n\n".format(
        len(train_data), len(val_data)))

    return train_data, val_data
Esempio n. 7
0
    def setup_method(self):
        """
        Set up parameters for test methods
        """
        Logger.init()
        Logger.remove_file_logger()

        self.params = Params()

        # get one entry from the database
        Config.add_config('./config.ini')
        collection_details = ("local_mongodb", "labels", "nuscenes_train")

        # get ids
        td, vd = load_ids(
            collection_details,
            data_split=(70, 30),
            limit=100,
            shuffle_data=True,
        )
        self.train_data = [td]
        self.val_data = [vd]
        self.collection_details = [collection_details]
Esempio n. 8
0
 def __init__(self,
              col_details: List[Tuple[str, str, str]],
              doc_ids: List[List[any]],
              batch_size: int = 32,
              processors: List[any] = list(),
              cache: ICache = None,
              shuffle_data: bool = True,
              data_group_size: int = 1,
              continues_data_selection: bool = True,
              fix_batch_size: bool = False):
     """
     :param col_details: MongoDB collection details with a tuple of 3 string entries
                         [client name (from config), database name, collection name]
     :param doc_ids: List of doc ids which are used to get the specific data from the MongoDB
     :param batch_size: number of batch size
     :param processors: List of Data processors
     :param cache: Passing instance of a cache e.g. RedisCache, if it is None, no caching is used.
                   Only possible if redis is locally available (has to be installed)
     :param shuffle_data: bool flag to determine if set should be shuffled after epoch is done
     :param data_group_size: number of steps that should be grouped e.g for time series. The data will still only
                             move forward one time step. E.g. for data_group_size=3:
                             [t-5, t-4, t-3], [t-4, t-3, t-2], [t-3, t-2, -1], etc.
                             data will not be shuffled in case data_group_size > 1
     :param fix_batch_size: if true batch size will always be the same, e.g. if batch_size=64 and there are only 63
                            datasamples left for the final batch, these 63 data points will be ignored. In case the
                            batch size of your model is fixed, set this to True.
     """
     super().__init__(batch_size, processors)
     self.doc_ids = doc_ids
     self.cache = cache
     self.shuffle_data = shuffle_data
     self.data_group_size = max(data_group_size, 1)
     self.continues_data_selection = continues_data_selection
     self.docs_per_batch = self.batch_size + (self.data_group_size - 1)
     self.col_details = col_details
     self.fix_batch_size = fix_batch_size
     self.collections = None
     self.mongo_con = MongoDBConnect()
     self.mongo_con.add_connections_from_config(Config.get_config_parser())
     # in case a step_size is chosen > 1, make sure that len(doc_ids) is a multiple of that
     # otherwise reshape will not be working and throw errors
     if self.data_group_size > 1:
         for i in range(len(self.doc_ids)):
             overflow = len(self.doc_ids[i]) % self.docs_per_batch
             self.doc_ids[i] = self.doc_ids[i][:len(self.doc_ids[i]) -
                                               overflow]
Esempio n. 9
0
        data = data_adapter.expand_1d(original_data)
        x, y_true, w = data_adapter.unpack_x_y_sample_weight(data)
        y_pred = keras_model(x, training=True)
        result = original_train_step(original_data)
        # custom stuff called during training
        show_pygame.show(x, y_true, y_pred)
        return result
    return call_custom_callbacks

if __name__ == "__main__":
    Logger.init()
    Logger.remove_file_logger()

    params = MultitaskParams(len(OD_CLASS_MAPPING.items()))

    Config.add_config('./config.ini')
    con = ("local_mongodb", "labels", "nuscenes_train")

    td, vd = load_ids(
        con,
        data_split=(90, 10),
        shuffle_data=True
    )

    train_data = [td]
    val_data = [vd]
    collection_details = [con]

    train_gen = MongoDBGenerator(
        collection_details,
        train_data,
Esempio n. 10
0
#!/usr/bin/env python3

import argparse
from datetime import datetime

from common.logger import logger
from common.utils import Config, Instance
from common.decorators import human_time


config = Config()

parser = argparse.ArgumentParser(description='Snapshots-tools. To work, you must add instances id to the config file (space separated if multiple)')
parser.add_argument('-v', '--version', action='version',  version='yc-snapshoter 0.3.1')
parser.add_argument('-c', '--create', action='store_true', required=False, help='create snapshots for VMs')
parser.add_argument('-d', '--delete', action='store_true', required=False, help='delete all old snapshots for instances')
parser.add_argument('-f', '--full', action='store_true', required=False, help='create snapshots and delete old snapshots for instances')
args = parser.parse_args()


try:
    instances = [inst for inst in config.instances_list if inst != '']
except Exception as err:
    logger.error(err)
    print(err)


if not instances:
    msg = 'Instances ID is empty. Please type instance_id into config file. If you have multiple VMs, separate them with a space'
    logger.warning(msg)
    print(msg)
Esempio n. 11
0
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
import os
from pathlib import Path
from typing import Dict

from django.contrib.admin import AdminSite

from common.utils import Config

BASE_DIR = Path(__file__).resolve(strict=True).parent.parent.parent

# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
CONFIG_FILE = os.path.join(BASE_DIR, "..", "config.yml")
config = Config(CONFIG_FILE)

AdminSite.site_title = config.get("SITE_TITLE", "Django Template Project")
AdminSite.site_header = config.get("SITE_HEADER", "Django Template Project")
AdminSite.index_title = config.get("INDEX_TITLE", "Django Template Administration")
SECRET_KEY = config.get("SECRET_KEY", raise_error=True)
DEBUG = config.get("DEBUG", False, cast=bool)
ALLOWED_HOSTS = config.get("ALLOWED_HOSTS", cast=list)

INSTALLED_APPS = [
    "administration",
    "common",
    "rest_framework",
    "drf_spectacular",
    "corsheaders",
    "django.contrib.contenttypes",
Esempio n. 12
0
import os
from pathlib import Path

from common.utils import Config

BASE_DIR = Path(__file__).resolve(strict=True).parent

CONFIG_FILE = os.path.join(BASE_DIR, "..", "config.yml")
config = Config(CONFIG_FILE)

config.get("DB_NAME", raise_error=True)
config.get("DB_PASSWORD", raise_error=True)