Exemplo n.º 1
0
def cross_validate_paf_graphs(
    config,
    inference_config,
    full_data_file,
    metadata_file,
    output_name="",
    pcutoff=0.1,
    greedy=False,
    add_discarded=True,
    calibrate=False,
    overwrite_config=True,
):
    cfg = auxiliaryfunctions.read_config(config)
    inf_cfg = auxiliaryfunctions.read_plainconfig(inference_config)
    inf_cfg_temp = inf_cfg.copy()
    inf_cfg_temp["pcutoff"] = pcutoff

    with open(full_data_file, "rb") as file:
        data = pickle.load(file)
    with open(metadata_file, "rb") as file:
        metadata = pickle.load(file)

    params = _set_up_evaluation(data)
    to_ignore = _filter_unwanted_paf_connections(config, params["paf_graph"])
    paf_inds, paf_scores = _get_n_best_paf_graphs(
        data, metadata, params["paf_graph"], ignore_inds=to_ignore
    )

    if calibrate:
        trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(cfg)
        calibration_file = os.path.join(
            cfg["project_path"],
            str(trainingsetfolder),
            "CollectedData_" + cfg["scorer"] + ".h5",
        )
    else:
        calibration_file = ""

    results = _benchmark_paf_graphs(
        cfg,
        inf_cfg_temp,
        data,
        paf_inds,
        greedy,
        add_discarded,
        calibration_file=calibration_file,
    )
    # Select optimal PAF graph
    df = results[1]
    size_opt = np.argmax((1 - df.loc["miss", "mean"]) * df.loc["purity", "mean"])
    pose_config = inference_config.replace("inference_cfg", "pose_cfg")
    if not overwrite_config:
        shutil.copy(pose_config, pose_config.replace(".yaml", "_old.yaml"))
    inds = list(paf_inds[size_opt])
    auxiliaryfunctions.edit_config(
        pose_config, {"paf_best": [int(ind) for ind in inds]}
    )
    if output_name:
        with open(output_name, "wb") as file:
            pickle.dump([results], file)
Exemplo n.º 2
0
 def edit_pose_config(self, event):
     """
     """
     self.shuffles.Enable(True)
     self.trainingindex.Enable(True)
     self.display_iters.Enable(True)
     self.save_iters.Enable(True)
     self.max_iters.Enable(True)
     self.snapshots.Enable(True)
     # Read the pose config file
     cfg = auxiliaryfunctions.read_config(self.config)
     trainFraction = cfg["TrainingFraction"][self.trainingindex.GetValue()]
     #        print(os.path.join(cfg['project_path'],auxiliaryfunctions.GetModelFolder(trainFraction, self.shuffles.GetValue(),cfg),'train','pose_cfg.yaml'))
     self.pose_cfg_path = os.path.join(
         cfg["project_path"],
         auxiliaryfunctions.GetModelFolder(trainFraction,
                                           self.shuffles.GetValue(), cfg),
         "train",
         "pose_cfg.yaml",
     )
     # let the user open the file with default text editor. Also make it mac compatible
     if sys.platform == "darwin":
         self.file_open_bool = subprocess.call(["open", self.pose_cfg_path])
         self.file_open_bool = True
     else:
         self.file_open_bool = webbrowser.open(self.pose_cfg_path)
     if self.file_open_bool:
         self.pose_cfg = auxiliaryfunctions.read_plainconfig(
             self.pose_cfg_path)
     else:
         raise FileNotFoundError("File not found!")
Exemplo n.º 3
0
def Downloadweights(modeltype, model_path):
    """
    Downloads the ImageNet pretrained weights for ResNets, MobileNets et al. from TensorFlow...
    """
    import urllib
    import tarfile
    from io import BytesIO

    target_dir = model_path.parents[0]
    neturls = auxiliaryfunctions.read_plainconfig(
        target_dir / "pretrained_model_urls.yaml"
    )
    try:
        if 'efficientnet' in modeltype:
            url = neturls['efficientnet']
            url = url + modeltype.replace('_','-') + '.tar.gz'
        else:
            url = neturls[modeltype]
        print("Downloading a ImageNet-pretrained model from {}....".format(url))
        response = urllib.request.urlopen(url)
        with tarfile.open(fileobj=BytesIO(response.read()), mode="r:gz") as tar:
            tar.extractall(path=target_dir)
    except KeyError:
        print("Model does not exist: ", modeltype)
        print("Pick one of the following: ", neturls.keys())
Exemplo n.º 4
0
def read_inferencecfg(path_inference_config, cfg):
    """Load inferencecfg or initialize it."""
    try:
        inferencecfg = auxiliaryfunctions.read_plainconfig(
            str(path_inference_config))
    except FileNotFoundError:
        inferencecfg = form_default_inferencecfg(cfg)
        auxiliaryfunctions.write_plainconfig(str(path_inference_config),
                                             dict(inferencecfg))
    return inferencecfg
Exemplo n.º 5
0
def form_default_inferencecfg(cfg):
    # load defaults
    inferencecfg = auxiliaryfunctions.read_plainconfig(
        os.path.join(auxiliaryfunctions.get_deeplabcut_path(),
                     "inference_cfg.yaml"))
    # set project specific parameters:
    inferencecfg["minimalnumberofconnections"] = (
        len(cfg["multianimalbodyparts"]) / 2)  # reasonable default
    inferencecfg["topktoretain"] = len(cfg["individuals"]) + 1 * (
        len(cfg["uniquebodyparts"]) > 0)  # reasonable default
    return inferencecfg
Exemplo n.º 6
0
def DownloadModel(modelname, target_dir):
    """
    Downloads a DeepLabCut Model Zoo Project
    """
    import urllib.request
    import tarfile
    from tqdm import tqdm

    def show_progress(count, block_size, total_size):
        pbar.update(block_size)

    def tarfilenamecutting(tarf):
        """' auxfun to extract folder path
        ie. /xyz-trainsetxyshufflez/
        """
        for memberid, member in enumerate(tarf.getmembers()):
            if memberid == 0:
                parent = str(member.path)
                l = len(parent) + 1
            if member.path.startswith(parent):
                member.path = member.path[l:]
                yield member

    dlc_path = auxiliaryfunctions.get_deeplabcut_path()
    neturls = auxiliaryfunctions.read_plainconfig(
        os.path.join(
            dlc_path,
            "pose_estimation_tensorflow",
            "models",
            "pretrained",
            "pretrained_model_urls.yaml",
        )
    )
    if modelname in neturls.keys():
        url = neturls[modelname]
        response = urllib.request.urlopen(url)
        print(
            "Downloading the model from the DeepLabCut server @Harvard -> Go Crimson!!! {}....".format(
                url
            )
        )
        total_size = int(response.getheader("Content-Length"))
        pbar = tqdm(unit="B", total=total_size, position=0)
        filename, _ = urllib.request.urlretrieve(url, reporthook=show_progress)
        with tarfile.open(filename, mode="r:gz") as tar:
            tar.extractall(target_dir, members=tarfilenamecutting(tar))
    else:
        models = [
            fn
            for fn in neturls.keys()
            if "resnet_" not in fn and "mobilenet_" not in fn
        ]
        print("Model does not exist: ", modelname)
        print("Pick one of the following: ", models)
 def update_params(self, event):
     # update the variables with the edited values in the pose config file
     if self.file_open_bool:
         self.pose_cfg = auxiliaryfunctions.read_plainconfig(self.pose_cfg_path)
         display_iters = str(self.pose_cfg["display_iters"])
         save_iters = str(self.pose_cfg["save_iters"])
         max_iters = str(self.pose_cfg["multi_step"][-1][-1])
         self.display_iters.SetValue(display_iters)
         self.save_iters.SetValue(save_iters)
         self.max_iters.SetValue(max_iters)
         self.shuffles.Enable(True)
         #self.trainingindex.Enable(True)
         self.display_iters.Enable(True)
         self.save_iters.Enable(True)
         self.max_iters.Enable(True)
         self.snapshots.Enable(True)
     else:
         raise FileNotFoundError("File not found!")
Exemplo n.º 8
0
def DownloadModel(modelname, target_dir):
    """
    Downloads a DeepLabCut Model Zoo Project
    """
    import urllib
    import urllib.request
    import tarfile
    from io import BytesIO
    from tqdm import tqdm

    def show_progress(count, block_size, total_size):
        pbar.update(block_size)

    def tarfilenamecutting(tarf):
        '''' auxfun to extract folder path
        ie. /xyz-trainsetxyshufflez/
        '''
        for memberid,member in enumerate(tarf.getmembers()):
            if memberid==0:
                parent=str(member.path)
                l=len(parent)+1
            if member.path.startswith(parent):
                member.path = member.path[l:]
                yield member

    #TODO: update how DLC path is obtained
    import deeplabcut
    neturls= auxiliaryfunctions.read_plainconfig(os.path.join(os.path.dirname(deeplabcut.__file__),'pose_estimation_tensorflow/models/pretrained/pretrained_model_urls.yaml'))
    if modelname in neturls.keys():
        url = neturls[modelname]
        response = urllib.request.urlopen(url)
        print("Downloading the model from the DeepLabCut server @Harvard -> Go Crimson!!! {}....".format(url))
        total_size = int(response.getheader('Content-Length'))
        pbar = tqdm(unit='B', total=total_size, position=0)
        filename, _ = urllib.request.urlretrieve(url, reporthook=show_progress)
        with tarfile.open(filename, mode='r:gz') as tar:
            tar.extractall(target_dir, members = tarfilenamecutting(tar))
    else:
        models=[fn for fn in neturls.keys() if 'resnet_' not in fn and 'mobilenet_' not in fn]
        print("Model does not exist: ", modelname)
        print("Pick one of the following: ", models)
Exemplo n.º 9
0
def Downloadweights(modeltype, model_path):
    """
    Downloads the ImageNet pretrained weights for ResNet.
    """

    import urllib
    import tarfile
    from io import BytesIO

    target_dir = model_path.parents[0]
    neturls = auxiliaryfunctions.read_plainconfig(target_dir /
                                                  'pretrained_model_urls.yaml')
    try:
        url = neturls[modeltype]
        print(
            "Downloading a ImageNet-pretrained model from {}....".format(url))
        response = urllib.request.urlopen(url)
        with tarfile.open(fileobj=BytesIO(response.read()),
                          mode='r:gz') as tar:
            tar.extractall(path=target_dir)
    except KeyError:
        print("Model does not exist", modeltype)
Exemplo n.º 10
0
def train_network(
    config,
    shuffle=1,
    trainingsetindex=0,
    max_snapshots_to_keep=5,
    displayiters=None,
    saveiters=None,
    maxiters=None,
    allow_growth=False,
    gputouse=None,
    autotune=False,
    keepdeconvweights=True,
    modelprefix="",
):
    """Trains the network with the labels in the training dataset.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    shuffle: int, optional
        Integer value specifying the shuffle index to select for training. Default is set to 1

    trainingsetindex: int, optional
        Integer specifying which TrainingsetFraction to use. By default the first (note that TrainingFraction is a list in config.yaml).

    Additional parameters:

    max_snapshots_to_keep: int, or None. Sets how many snapshots are kept, i.e. states of the trained network. Every savinginteration many times
        a snapshot is stored, however only the last max_snapshots_to_keep many are kept! If you change this to None, then all are kept.
        See: https://github.com/AlexEMG/DeepLabCut/issues/8#issuecomment-387404835

    displayiters: this variable is actually set in pose_config.yaml. However, you can overwrite it with this hack. Don't use this regularly, just if you are too lazy to dig out
        the pose_config.yaml file for the corresponding project. If None, the value from there is used, otherwise it is overwritten! Default: None

    saveiters: this variable is actually set in pose_config.yaml. However, you can overwrite it with this hack. Don't use this regularly, just if you are too lazy to dig out
        the pose_config.yaml file for the corresponding project. If None, the value from there is used, otherwise it is overwritten! Default: None

    maxiters: this variable is actually set in pose_config.yaml. However, you can overwrite it with this hack. Don't use this regularly, just if you are too lazy to dig out
        the pose_config.yaml file for the corresponding project. If None, the value from there is used, otherwise it is overwritten! Default: None

    allow_growth: bool, default false.
        For some smaller GPUs the memory issues happen. If true, the memory allocator does not pre-allocate the entire specified
        GPU memory region, instead starting small and growing as needed. See issue: https://forum.image.sc/t/how-to-stop-running-out-of-vram/30551/2

    gputouse: int, optional. Natural number indicating the number of your GPU (see number in nvidia-smi). If you do not have a GPU put None.
        See: https://nvidia.custhelp.com/app/answers/detail/a_id/3751/~/useful-nvidia-smi-queries

    autotune: property of TensorFlow, somehow faster if 'false' (as Eldar found out, see https://github.com/tensorflow/tensorflow/issues/13317). Default: False

    keepdeconvweights: bool, default: true
        Also restores the weights of the deconvolution layers (and the backbone) when training from a snapshot. Note that if you change the number of bodyparts, you need to
        set this to false for re-training.

    Example
    --------
    for training the network for first shuffle of the training dataset.
    >>> deeplabcut.train_network('/analysis/project/reaching-task/config.yaml')
    --------

    for training the network for second shuffle of the training dataset.
    >>> deeplabcut.train_network('/analysis/project/reaching-task/config.yaml',shuffle=2,keepdeconvweights=True)
    --------

    """
    import tensorflow as tf

    # reload logger.
    import importlib
    import logging

    importlib.reload(logging)
    logging.shutdown()

    from deeplabcut.utils import auxiliaryfunctions

    tf.compat.v1.reset_default_graph()
    start_path = os.getcwd()

    # Read file path for pose_config file. >> pass it on
    cfg = auxiliaryfunctions.read_config(config)
    modelfoldername = auxiliaryfunctions.GetModelFolder(
        cfg["TrainingFraction"][trainingsetindex],
        shuffle,
        cfg,
        modelprefix=modelprefix)
    poseconfigfile = Path(
        os.path.join(cfg["project_path"], str(modelfoldername), "train",
                     "pose_cfg.yaml"))
    if not poseconfigfile.is_file():
        print("The training datafile ", poseconfigfile, " is not present.")
        print(
            "Probably, the training dataset for this specific shuffle index was not created."
        )
        print(
            "Try with a different shuffle/trainingsetfraction or use function 'create_training_dataset' to create a new trainingdataset with this shuffle index."
        )
    else:
        # Set environment variables
        if (autotune is not False
            ):  # see: https://github.com/tensorflow/tensorflow/issues/13317
            os.environ["TF_CUDNN_USE_AUTOTUNE"] = "0"
        if gputouse is not None:
            os.environ["CUDA_VISIBLE_DEVICES"] = str(gputouse)
    try:
        cfg_dlc = auxiliaryfunctions.read_plainconfig(poseconfigfile)
        if "multi-animal" in cfg_dlc["dataset_type"]:
            from deeplabcut.pose_estimation_tensorflow.core.train_multianimal import train

            print("Selecting multi-animal trainer")
            train(
                str(poseconfigfile),
                displayiters,
                saveiters,
                maxiters,
                max_to_keep=max_snapshots_to_keep,
                keepdeconvweights=keepdeconvweights,
                allow_growth=allow_growth,
            )  # pass on path and file name for pose_cfg.yaml!
        else:
            from deeplabcut.pose_estimation_tensorflow.core.train import train

            print("Selecting single-animal trainer")
            train(
                str(poseconfigfile),
                displayiters,
                saveiters,
                maxiters,
                max_to_keep=max_snapshots_to_keep,
                keepdeconvweights=keepdeconvweights,
                allow_growth=allow_growth,
            )  # pass on path and file name for pose_cfg.yaml!

    except BaseException as e:
        raise e
    finally:
        os.chdir(str(start_path))
    print(
        "The network is now trained and ready to evaluate. Use the function 'evaluate_network' to evaluate the network."
    )
Exemplo n.º 11
0
def bayesian_search(
    config_path,
    inferencecfg,
    pbounds,
    edgewisecondition=True,
    shuffle=1,
    trainingsetindex=0,
    modelprefix="",
    snapshotindex=-1,
    target="rpck_test",
    maximize=True,
    init_points=20,
    n_iter=50,
    acq="ei",
    log_file=None,
    dcorr=5,
    leastbpts=3,
    printingintermediatevalues=True,
):  #

    if "rpck" in target:
        assert maximize == True

    if "rmse" in target:
        assert maximize == False

    cfg = auxiliaryfunctions.read_config(config_path)
    evaluationfolder = os.path.join(
        cfg["project_path"],
        str(
            auxiliaryfunctions.GetEvaluationFolder(
                cfg["TrainingFraction"][int(trainingsetindex)],
                shuffle,
                cfg,
                modelprefix=modelprefix,
            )),
    )

    DLCscorer, DLCscorerlegacy = auxiliaryfunctions.GetScorerName(
        cfg,
        shuffle,
        cfg["TrainingFraction"][int(trainingsetindex)],
        cfg["iteration"],
        modelprefix=modelprefix,
    )

    # load params
    fns = return_evaluate_network_data(
        config_path,
        shuffle=shuffle,
        trainingsetindex=trainingsetindex,
        modelprefix=modelprefix,
    )
    predictionsfn = fns[snapshotindex]
    data, metadata = auxfun_multianimal.LoadFullMultiAnimalData(predictionsfn)
    params = set_up_evaluation(data)
    columns = ["train_iter", "train_frac", "shuffle"]
    columns += [
        "_".join((b, a)) for a in ("train", "test")
        for b in ("rmse", "hits", "misses", "falsepos", "ndetects", "pck",
                  "rpck")
    ]

    train_iter = trainingsetindex  # int(predictionsfn.split('-')[-1].split('.')[0])
    train_frac = cfg["TrainingFraction"][
        train_iter]  # int(predictionsfn.split('trainset')[1].split('shuffle')[0])
    trainIndices = metadata["data"]["trainIndices"]
    testIndices = metadata["data"]["testIndices"]

    if edgewisecondition:
        mf = str(
            auxiliaryfunctions.GetModelFolder(
                cfg["TrainingFraction"][int(trainingsetindex)],
                shuffle,
                cfg,
                modelprefix=modelprefix,
            ))
        modelfolder = os.path.join(cfg["project_path"], mf)
        path_inferencebounds_config = (Path(modelfolder) / "test" /
                                       "inferencebounds.yaml")
        try:
            inferenceboundscfg = auxiliaryfunctions.read_plainconfig(
                path_inferencebounds_config)
        except FileNotFoundError:
            print("Computing distances...")
            from deeplabcut.pose_estimation_tensorflow import calculatepafdistancebounds

            inferenceboundscfg = calculatepafdistancebounds(
                config_path, shuffle, trainingsetindex)
            auxiliaryfunctions.write_plainconfig(path_inferencebounds_config,
                                                 inferenceboundscfg)

        partaffinityfield_graph = params["paf_graph"]
        upperbound = np.array([
            float(inferenceboundscfg[str(edge[0]) + "_" +
                                     str(edge[1])]["intra_max"])
            for edge in partaffinityfield_graph
        ])
        lowerbound = np.array([
            float(inferenceboundscfg[str(edge[0]) + "_" +
                                     str(edge[1])]["intra_min"])
            for edge in partaffinityfield_graph
        ])

        upperbound *= inferencecfg["upperbound_factor"]
        lowerbound *= inferencecfg["lowerbound_factor"]

    else:
        lowerbound = None
        upperbound = None

    def dlc_hyperparams(**kwargs):
        inferencecfg.update(kwargs)
        # Ensure type consistency
        for k, (bound, _) in pbounds.items():
            inferencecfg[k] = type(bound)(inferencecfg[k])

        stats = compute_crossval_metrics_preloadeddata(
            params,
            columns,
            inferencecfg,
            data,
            trainIndices,
            testIndices,
            train_iter,
            train_frac,
            shuffle,
            lowerbound,
            upperbound,
            dcorr=dcorr,
            leastbpts=leastbpts,
        )

        # stats = compute_crossval_metrics(config_path, inferencecfg, shuffle,trainingsetindex,
        #                                    dcorr=dcorr,leastbpts=leastbpts,modelprefix=modelprefix)

        if printingintermediatevalues:
            print(
                "rpck",
                stats["rpck_test"].values[0],
                "rpck train:",
                stats["rpck_train"].values[0],
            )
            print(
                "rmse",
                stats["rmse_test"].values[0],
                "miss",
                stats["misses_test"].values[0],
                "hit",
                stats["hits_test"].values[0],
            )

        # val = stats['rmse_test'].values[0]*(1+stats['misses_test'].values[0]*1./stats['hits_test'].values[0])
        val = stats[target].values[0]
        if np.isnan(val):
            if maximize:  # pck case
                val = -1e9  # random small number
            else:  # RMSE, return a large RMSE
                val = 1e9

        if not maximize:
            val = -val

        return val

    opt = BayesianOptimization(f=dlc_hyperparams,
                               pbounds=pbounds,
                               random_state=42)
    if log_file:
        load_logs(opt, log_file)
    logger = JSONLogger(path=os.path.join(evaluationfolder, "opti_log" +
                                          DLCscorer + ".json"))
    opt.subscribe(Events.OPTIMIZATION_STEP, logger)
    opt.maximize(init_points=init_points, n_iter=n_iter, acq=acq)

    inferencecfg.update(opt.max["params"])
    for k, (bound, _) in pbounds.items():
        tmp = type(bound)(inferencecfg[k])
        if isinstance(tmp, np.floating):
            tmp = np.round(tmp, 2).item()
        inferencecfg[k] = tmp

    return inferencecfg, opt
Exemplo n.º 12
0
    def __init__(self, parent, gui_size, cfg):
        """Constructor"""
        wx.Panel.__init__(self, parent=parent)
        # variable initilization
        self.method = "automatic"
        self.config = cfg
        # design the panel
        self.sizer = wx.GridBagSizer(5, 5)

        text = wx.StaticText(self, label="DeepLabCut - Step 5. Train network")
        self.sizer.Add(text,
                       pos=(0, 0),
                       flag=wx.TOP | wx.LEFT | wx.BOTTOM,
                       border=15)
        # Add logo of DLC
        icon = wx.StaticBitmap(self, bitmap=wx.Bitmap(LOGO_PATH))
        self.sizer.Add(icon,
                       pos=(0, 4),
                       flag=wx.TOP | wx.RIGHT | wx.ALIGN_RIGHT,
                       border=5)

        line1 = wx.StaticLine(self)
        self.sizer.Add(line1,
                       pos=(1, 0),
                       span=(1, 5),
                       flag=wx.EXPAND | wx.BOTTOM,
                       border=10)

        self.cfg_text = wx.StaticText(self, label="Select the config file")
        self.sizer.Add(self.cfg_text,
                       pos=(2, 0),
                       flag=wx.TOP | wx.LEFT,
                       border=5)

        if sys.platform == "darwin":
            self.sel_config = wx.FilePickerCtrl(
                self,
                path="",
                style=wx.FLP_USE_TEXTCTRL,
                message="Choose the config.yaml file",
                wildcard="*.yaml",
            )
        else:
            self.sel_config = wx.FilePickerCtrl(
                self,
                path="",
                style=wx.FLP_USE_TEXTCTRL,
                message="Choose the config.yaml file",
                wildcard="config.yaml",
            )
        # self.sel_config = wx.FilePickerCtrl(self, path="",style=wx.FLP_USE_TEXTCTRL,message="Choose the config.yaml file", wildcard="config.yaml")
        self.sizer.Add(self.sel_config,
                       pos=(2, 1),
                       span=(1, 3),
                       flag=wx.TOP | wx.EXPAND,
                       border=5)
        self.sel_config.SetPath(self.config)
        self.sel_config.Bind(wx.EVT_FILEPICKER_CHANGED, self.select_config)

        vbox1 = wx.BoxSizer(wx.VERTICAL)
        self.pose_cfg_text = wx.Button(
            self, label="Click to open the pose config file")
        self.pose_cfg_text.Bind(wx.EVT_BUTTON, self.edit_pose_config)
        vbox1.Add(self.pose_cfg_text, 10, wx.EXPAND | wx.TOP | wx.BOTTOM, 5)

        self.update_params_text = wx.Button(self,
                                            label="Update the parameters")
        self.update_params_text.Bind(wx.EVT_BUTTON, self.update_params)
        vbox1.Add(self.update_params_text, 10, wx.EXPAND | wx.TOP | wx.BOTTOM,
                  5)

        self.pose_cfg_text.Hide()
        self.update_params_text.Hide()

        sb = wx.StaticBox(self, label="Optional Attributes")
        boxsizer = wx.StaticBoxSizer(sb, wx.VERTICAL)

        hbox1 = wx.BoxSizer(wx.HORIZONTAL)
        hbox2 = wx.BoxSizer(wx.HORIZONTAL)

        shuffles_text = wx.StaticBox(self, label="Specify the shuffle")
        shuffles_text_boxsizer = wx.StaticBoxSizer(shuffles_text, wx.VERTICAL)
        self.shuffles = wx.SpinCtrl(self, value="1", min=0, max=100)
        shuffles_text_boxsizer.Add(self.shuffles, 1,
                                   wx.EXPAND | wx.TOP | wx.BOTTOM, 10)

        trainingindex = wx.StaticBox(self,
                                     label="Specify the trainingset index")
        trainingindex_boxsizer = wx.StaticBoxSizer(trainingindex, wx.VERTICAL)
        self.trainingindex = wx.SpinCtrl(self, value="0", min=0, max=100)
        trainingindex_boxsizer.Add(self.trainingindex, 1,
                                   wx.EXPAND | wx.TOP | wx.BOTTOM, 10)

        self.pose_cfg_choice = wx.RadioBox(
            self,
            label="Want to edit pose_cfg.yaml file?",
            choices=["Yes", "No"],
            majorDimension=1,
            style=wx.RA_SPECIFY_COLS,
        )
        self.pose_cfg_choice.Bind(wx.EVT_RADIOBOX, self.chooseOption)
        self.pose_cfg_choice.SetSelection(1)

        # use the default pose_cfg file for default values
        default_pose_cfg_path = os.path.join(
            Path(deeplabcut.__file__).parent, "pose_cfg.yaml")
        pose_cfg = auxiliaryfunctions.read_plainconfig(default_pose_cfg_path)
        display_iters = str(pose_cfg["display_iters"])
        save_iters = str(pose_cfg["save_iters"])
        max_iters = str(pose_cfg["multi_step"][-1][-1])

        display_iters_text = wx.StaticBox(self, label="Display iterations")
        display_iters_text_boxsizer = wx.StaticBoxSizer(
            display_iters_text, wx.VERTICAL)
        self.display_iters = wx.SpinCtrl(self,
                                         value=display_iters,
                                         min=1,
                                         max=int(max_iters))
        display_iters_text_boxsizer.Add(self.display_iters, 1,
                                        wx.EXPAND | wx.TOP | wx.BOTTOM, 10)
        # self.display_iters.Enable(False)

        save_iters_text = wx.StaticBox(self, label="Save iterations")
        save_iters_text_boxsizer = wx.StaticBoxSizer(save_iters_text,
                                                     wx.VERTICAL)
        self.save_iters = wx.SpinCtrl(self,
                                      value=save_iters,
                                      min=1,
                                      max=int(max_iters))
        save_iters_text_boxsizer.Add(self.save_iters, 1,
                                     wx.EXPAND | wx.TOP | wx.BOTTOM, 10)
        # self.save_iters.Enable(False)

        max_iters_text = wx.StaticBox(self, label="Maximum iterations")
        max_iters_text_boxsizer = wx.StaticBoxSizer(max_iters_text,
                                                    wx.VERTICAL)
        self.max_iters = wx.SpinCtrl(self,
                                     value=max_iters,
                                     min=1,
                                     max=int(max_iters))
        max_iters_text_boxsizer.Add(self.max_iters, 1,
                                    wx.EXPAND | wx.TOP | wx.BOTTOM, 10)
        # self.max_iters.Enable(False)

        snapshots = wx.StaticBox(self, label="Number of snapshots to keep")
        snapshots_boxsizer = wx.StaticBoxSizer(snapshots, wx.VERTICAL)
        self.snapshots = wx.SpinCtrl(self, value="5", min=1, max=100)
        snapshots_boxsizer.Add(self.snapshots, 1,
                               wx.EXPAND | wx.TOP | wx.BOTTOM, 10)
        # self.snapshots.Enable(False)

        hbox1.Add(shuffles_text_boxsizer, 10, wx.EXPAND | wx.TOP | wx.BOTTOM,
                  5)
        hbox1.Add(trainingindex_boxsizer, 10, wx.EXPAND | wx.TOP | wx.BOTTOM,
                  5)
        hbox1.Add(self.pose_cfg_choice, 10, wx.EXPAND | wx.TOP | wx.BOTTOM, 10)
        hbox1.Add(vbox1, 10, wx.EXPAND | wx.TOP | wx.BOTTOM, 10)

        hbox2.Add(display_iters_text_boxsizer, 10,
                  wx.EXPAND | wx.TOP | wx.BOTTOM, 5)

        hbox2.Add(save_iters_text_boxsizer, 10, wx.EXPAND | wx.TOP | wx.BOTTOM,
                  5)
        hbox2.Add(max_iters_text_boxsizer, 10, wx.EXPAND | wx.TOP | wx.BOTTOM,
                  5)
        hbox2.Add(snapshots_boxsizer, 10, wx.EXPAND | wx.TOP | wx.BOTTOM, 5)

        boxsizer.Add(hbox1, 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 10)
        boxsizer.Add(hbox2, 0, wx.EXPAND | wx.TOP | wx.BOTTOM, 10)

        self.sizer.Add(
            boxsizer,
            pos=(4, 0),
            span=(1, 5),
            flag=wx.EXPAND | wx.TOP | wx.LEFT | wx.RIGHT,
            border=10,
        )

        self.help_button = wx.Button(self, label="Help")
        self.sizer.Add(self.help_button, pos=(5, 0), flag=wx.LEFT, border=10)
        self.help_button.Bind(wx.EVT_BUTTON, self.help_function)

        self.ok = wx.Button(self, label="Ok")
        self.sizer.Add(self.ok, pos=(5, 4))
        self.ok.Bind(wx.EVT_BUTTON, self.train_network)

        self.cancel = wx.Button(self, label="Reset")
        self.sizer.Add(self.cancel,
                       pos=(5, 1),
                       span=(1, 1),
                       flag=wx.BOTTOM | wx.RIGHT,
                       border=10)
        self.cancel.Bind(wx.EVT_BUTTON, self.cancel_train_network)

        self.sizer.AddGrowableCol(2)

        self.SetSizer(self.sizer)
        self.sizer.Fit(self)
Exemplo n.º 13
0
def create_training_dataset(
    config,
    num_shuffles=1,
    Shuffles=None,
    windows2linux=False,
    userfeedback=False,
    trainIndices=None,
    testIndices=None,
    net_type=None,
    augmenter_type=None,
    posecfg_template=None,
):
    """
    Creates a training dataset. Labels from all the extracted frames are merged into a single .h5 file.\n
    Only the videos included in the config file are used to create this dataset.\n

    [OPTIONAL] Use the function 'add_new_video' at any stage of the project to add more videos to the project.

    Parameter
    ----------
    config : string
        Full path of the config.yaml file as a string.

    num_shuffles : int, optional
        Number of shuffles of training dataset to create, i.e. [1,2,3] for num_shuffles=3. Default is set to 1.

    Shuffles: list of shuffles.
        Alternatively the user can also give a list of shuffles (integers!).

    userfeedback: bool, optional
        If this is set to false, then all requested train/test splits are created (no matter if they already exist). If you
        want to assure that previous splits etc. are not overwritten, then set this to True and you will be asked for each split.

    trainIndices: list of lists, optional (default=None)
        List of one or multiple lists containing train indexes.
        A list containing two lists of training indexes will produce two splits.

    testIndices: list of lists, optional (default=None)
        List of one or multiple lists containing test indexes.

    net_type: list
        Type of networks. Currently resnet_50, resnet_101, resnet_152, mobilenet_v2_1.0, mobilenet_v2_0.75,
        mobilenet_v2_0.5, mobilenet_v2_0.35, efficientnet-b0, efficientnet-b1, efficientnet-b2, efficientnet-b3,
        efficientnet-b4, efficientnet-b5, and efficientnet-b6 are supported.

    augmenter_type: string
        Type of augmenter. Currently default, imgaug, tensorpack, and deterministic are supported.

    posecfg_template: string (optional, default=None)
        Path to a pose_cfg.yaml file to use as a template for generating the new one for the current iteration. Useful if you
        would like to start with the same parameters a previous training iteration. None uses the default pose_cfg.yaml.

    Example
    --------
    >>> deeplabcut.create_training_dataset('/analysis/project/reaching-task/config.yaml',num_shuffles=1)
    Windows:
    >>> deeplabcut.create_training_dataset('C:\\Users\\Ulf\\looming-task\\config.yaml',Shuffles=[3,17,5])
    --------
    """
    import scipy.io as sio

    if windows2linux:
        # DeprecationWarnings are silenced since Python 3.2 unless triggered in __main__
        warnings.warn(
            "`windows2linux` has no effect since 2.2.0.4 and will be removed in 2.2.1.",
            FutureWarning,
        )

    # Loading metadata from config file:
    cfg = auxiliaryfunctions.read_config(config)
    if posecfg_template:
        if not posecfg_template.endswith("pose_cfg.yaml"):
            raise ValueError(
                "posecfg_template argument must contain path to a pose_cfg.yaml file"
            )
        else:
            print("Reloading pose_cfg parameters from " + posecfg_template +
                  '\n')
            from deeplabcut.utils.auxiliaryfunctions import read_plainconfig

            prior_cfg = read_plainconfig(posecfg_template)
    if cfg.get("multianimalproject", False):
        from deeplabcut.generate_training_dataset.multiple_individuals_trainingsetmanipulation import (
            create_multianimaltraining_dataset, )

        create_multianimaltraining_dataset(config,
                                           num_shuffles,
                                           Shuffles,
                                           net_type=net_type)
    else:
        scorer = cfg["scorer"]
        project_path = cfg["project_path"]
        # Create path for training sets & store data there
        trainingsetfolder = auxiliaryfunctions.GetTrainingSetFolder(
            cfg)  # Path concatenation OS platform independent
        auxiliaryfunctions.attempttomakefolder(Path(
            os.path.join(project_path, str(trainingsetfolder))),
                                               recursive=True)

        Data = merge_annotateddatasets(
            cfg,
            Path(os.path.join(project_path, trainingsetfolder)),
        )
        if Data is None:
            return
        Data = Data[scorer]  # extract labeled data

        # loading & linking pretrained models
        if net_type is None:  # loading & linking pretrained models
            net_type = cfg.get("default_net_type", "resnet_50")
        else:
            if ("resnet" in net_type or "mobilenet" in net_type
                    or "efficientnet" in net_type):
                pass
            else:
                raise ValueError("Invalid network type:", net_type)

        if augmenter_type is None:
            augmenter_type = cfg.get("default_augmenter", "imgaug")
            if augmenter_type is None:  # this could be in config.yaml for old projects!
                # updating variable if null/None! #backwardscompatability
                auxiliaryfunctions.edit_config(config,
                                               {"default_augmenter": "imgaug"})
                augmenter_type = "imgaug"
        elif augmenter_type not in [
                "default",
                "scalecrop",
                "imgaug",
                "tensorpack",
                "deterministic",
        ]:
            raise ValueError("Invalid augmenter type:", augmenter_type)

        if posecfg_template:
            if net_type != prior_cfg["net_type"]:
                print(
                    "WARNING: Specified net_type does not match net_type from posecfg_template path entered. Proceed with caution."
                )
            if augmenter_type != prior_cfg["dataset_type"]:
                print(
                    "WARNING: Specified augmenter_type does not match dataset_type from posecfg_template path entered. Proceed with caution."
                )

        # Loading the encoder (if necessary downloading from TF)
        dlcparent_path = auxiliaryfunctions.get_deeplabcut_path()
        if not posecfg_template:
            defaultconfigfile = os.path.join(dlcparent_path, "pose_cfg.yaml")
        elif posecfg_template:
            defaultconfigfile = posecfg_template
        model_path, num_shuffles = auxfun_models.Check4weights(
            net_type, Path(dlcparent_path), num_shuffles)

        if Shuffles is None:
            Shuffles = range(1, num_shuffles + 1)
        else:
            Shuffles = [i for i in Shuffles if isinstance(i, int)]

        # print(trainIndices,testIndices, Shuffles, augmenter_type,net_type)
        if trainIndices is None and testIndices is None:
            splits = [(
                trainFraction,
                shuffle,
                SplitTrials(range(len(Data.index)), trainFraction),
            ) for trainFraction in cfg["TrainingFraction"]
                      for shuffle in Shuffles]
        else:
            if len(trainIndices) != len(testIndices) != len(Shuffles):
                raise ValueError(
                    "Number of Shuffles and train and test indexes should be equal."
                )
            splits = []
            for shuffle, (train_inds, test_inds) in enumerate(
                    zip(trainIndices, testIndices)):
                trainFraction = round(
                    len(train_inds) * 1.0 / (len(train_inds) + len(test_inds)),
                    2)
                print(
                    f"You passed a split with the following fraction: {int(100 * trainFraction)}%"
                )
                # Now that the training fraction is guaranteed to be correct,
                # the values added to pad the indices are removed.
                train_inds = np.asarray(train_inds)
                train_inds = train_inds[train_inds != -1]
                test_inds = np.asarray(test_inds)
                test_inds = test_inds[test_inds != -1]
                splits.append((trainFraction, Shuffles[shuffle], (train_inds,
                                                                  test_inds)))

        bodyparts = cfg["bodyparts"]
        nbodyparts = len(bodyparts)
        for trainFraction, shuffle, (trainIndices, testIndices) in splits:
            if len(trainIndices) > 0:
                if userfeedback:
                    trainposeconfigfile, _, _ = training.return_train_network_path(
                        config,
                        shuffle=shuffle,
                        trainingsetindex=cfg["TrainingFraction"].index(
                            trainFraction),
                    )
                    if trainposeconfigfile.is_file():
                        askuser = input(
                            "The model folder is already present. If you continue, it will overwrite the existing model (split). Do you want to continue?(yes/no): "
                        )
                        if (askuser == "no" or askuser == "No"
                                or askuser == "N" or askuser == "No"):
                            raise Exception(
                                "Use the Shuffles argument as a list to specify a different shuffle index. Check out the help for more details."
                            )

                ####################################################
                # Generating data structure with labeled information & frame metadata (for deep cut)
                ####################################################
                # Make training file!
                (
                    datafilename,
                    metadatafilename,
                ) = auxiliaryfunctions.GetDataandMetaDataFilenames(
                    trainingsetfolder, trainFraction, shuffle, cfg)

                ################################################################################
                # Saving data file (convert to training file for deeper cut (*.mat))
                ################################################################################
                data, MatlabData = format_training_data(
                    Data, trainIndices, nbodyparts, project_path)
                sio.savemat(os.path.join(project_path, datafilename),
                            {"dataset": MatlabData})

                ################################################################################
                # Saving metadata (Pickle file)
                ################################################################################
                auxiliaryfunctions.SaveMetadata(
                    os.path.join(project_path, metadatafilename),
                    data,
                    trainIndices,
                    testIndices,
                    trainFraction,
                )

                ################################################################################
                # Creating file structure for training &
                # Test files as well as pose_yaml files (containing training and testing information)
                #################################################################################
                modelfoldername = auxiliaryfunctions.GetModelFolder(
                    trainFraction, shuffle, cfg)
                auxiliaryfunctions.attempttomakefolder(
                    Path(config).parents[0] / modelfoldername, recursive=True)
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + "/train")
                auxiliaryfunctions.attempttomakefolder(
                    str(Path(config).parents[0] / modelfoldername) + "/test")

                path_train_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "train",
                        "pose_cfg.yaml",
                    ))
                path_test_config = str(
                    os.path.join(
                        cfg["project_path"],
                        Path(modelfoldername),
                        "test",
                        "pose_cfg.yaml",
                    ))
                # str(cfg['proj_path']+'/'+Path(modelfoldername) / 'test'  /  'pose_cfg.yaml')
                items2change = {
                    "dataset": datafilename,
                    "metadataset": metadatafilename,
                    "num_joints": len(bodyparts),
                    "all_joints": [[i] for i in range(len(bodyparts))],
                    "all_joints_names": [str(bpt) for bpt in bodyparts],
                    "init_weights": model_path,
                    "project_path": str(cfg["project_path"]),
                    "net_type": net_type,
                    "dataset_type": augmenter_type,
                }

                items2drop = {}
                if augmenter_type == "scalecrop":
                    # these values are dropped as scalecrop
                    # doesn't have rotation implemented
                    items2drop = {"rotation": 0, "rotratio": 0.0}
                # Also drop maDLC smart cropping augmentation parameters
                for key in [
                        "pre_resize", "crop_size", "max_shift", "crop_sampling"
                ]:
                    items2drop[key] = None

                trainingdata = MakeTrain_pose_yaml(items2change,
                                                   path_train_config,
                                                   defaultconfigfile,
                                                   items2drop)

                keys2save = [
                    "dataset",
                    "num_joints",
                    "all_joints",
                    "all_joints_names",
                    "net_type",
                    "init_weights",
                    "global_scale",
                    "location_refinement",
                    "locref_stdev",
                ]
                MakeTest_pose_yaml(trainingdata, keys2save, path_test_config)
                print(
                    "The training dataset is successfully created. Use the function 'train_network' to start training. Happy training!"
                )

        return splits
Exemplo n.º 14
0
"""
DeepLabCut2.2 Toolbox (deeplabcut.org)
© A. & M. Mathis Labs
https://github.com/DeepLabCut/DeepLabCut

Please see AUTHORS for contributors.
https://github.com/DeepLabCut/DeepLabCut/blob/master/AUTHORS
Licensed under GNU Lesser General Public License v3.0
"""
import os
from deeplabcut.utils.auxiliaryfunctions import (
    read_plainconfig,
    get_deeplabcut_path,
)

dlcparent_path = get_deeplabcut_path()
reid_config = os.path.join(dlcparent_path, "reid_cfg.yaml")
cfg = read_plainconfig(reid_config)