Example #1
0
import apt_expts
import os
import ast
import apt_expts
import os
import pickle

os.environ['CUDA_VISIBLE_DEVICES'] = ''

gt_lbl = None
lbl_file = '/groups/branson/bransonlab/apt/experiments/data/roian_apt_dlstripped.lbl'
op_af_graph = '\(0,1\),\(0,2\),\(0,3\),\(1,2\),\(1,3\),\(2,3\)'

lbl = h5py.File(lbl_file, 'r')
proj_name = apt.read_string(lbl['projname'])
nviews = int(apt.read_entry(lbl['cfg']['NumViews']))
lbl.close()
cache_dir = '/nrs/branson/mayank/apt_cache'
all_models = ['openpose']

gpu_model = 'GeForceRTX2080Ti'
sdir = '/groups/branson/home/kabram/bransonlab/APT/deepnet/singularity_stuff'
n_splits = 3

common_conf = {}
common_conf['rrange'] = 10
common_conf['trange'] = 5
common_conf['mdn_use_unet_loss'] = True
common_conf['dl_steps'] = 100000
common_conf['decay_steps'] = 20000
common_conf['save_step'] = 5000
Example #2
0
def compute_peformance(args):
    H = h5py.File(args.lbl_file, 'r')
    nviews = int(apt.read_entry(H['cfg']['NumViews']))
    dir_name = args.whose

    if len(args.nets) == 0:
        all_nets = methods
    else:
        all_nets = args.nets

    all_preds = {}

    for view in range(nviews):
        db_file = os.path.join(out_dir, args.name,
                               args.gt_name) + '_view{}.tfrecords'.format(view)
        conf = apt.create_conf(args.lbl_file,
                               view,
                               name='a',
                               net_type=all_nets[0],
                               cache_dir=os.path.join(out_dir, args.name,
                                                      dir_name))
        conf.labelfile = args.gt_lbl
        if not (os.path.exists(db_file) and args.skip_gt_db):
            print('Creating GT DB file {}'.format(db_file))
            apt.create_tfrecord(conf,
                                split=False,
                                on_gt=True,
                                db_files=(db_file, ))

    for curm in all_nets:
        all_preds[curm] = []
        for view in range(nviews):
            cur_out = []
            db_file = os.path.join(
                out_dir, args.name,
                args.gt_name) + '_view{}.tfrecords'.format(view)
            if args.split_type is None:
                cachedir = os.path.join(out_dir, args.name, dir_name,
                                        '{}_view_{}'.format(curm,
                                                            view), 'full')
                conf = apt.create_conf(args.lbl_file,
                                       view,
                                       name='a',
                                       net_type=curm,
                                       cache_dir=cachedir)
                model_files, ts = get_model_files(conf, cachedir, curm)
                for mndx, m in enumerate(model_files):
                    out_file = m + '_' + args.gt_name
                    load = False
                    if curm == 'unet' or curm == 'deeplabcut':
                        mm = m + '.index'
                    else:
                        mm = m
                    if os.path.exists(out_file + '.mat') and os.path.getmtime(
                            out_file + '.mat') > os.path.getmtime(mm):
                        load = True

                    if load:
                        H = sio.loadmat(out_file)
                        pred = H['pred_locs'] - 1
                        label = H['labeled_locs'] - 1
                        gt_list = H['list'] - 1
                    else:
                        # pred, label, gt_list = apt.classify_gt_data(conf, curm, out_file, m)
                        tf_iterator = multiResData.tf_reader(
                            conf, db_file, False)
                        tf_iterator.batch_size = 1
                        read_fn = tf_iterator.next
                        pred_fn, close_fn, _ = apt.get_pred_fn(curm, conf, m)
                        pred, label, gt_list = apt.classify_db(
                            conf, read_fn, pred_fn, tf_iterator.N)
                        close_fn()
                        mat_pred_locs = pred + 1
                        mat_labeled_locs = np.array(label) + 1
                        mat_list = gt_list

                        sio.savemat(
                            out_file, {
                                'pred_locs': mat_pred_locs,
                                'labeled_locs': mat_labeled_locs,
                                'list': mat_list
                            })

                    cur_out.append(
                        [pred, label, gt_list, m, out_file, view, 0, ts[mndx]])

            else:

                for cur_split in range(nsplits):
                    cachedir = os.path.join(out_dir, args.name,
                                            '{}_view_{}'.format(curm, view),
                                            'cv_{}'.format(cur_split))
                    conf = apt.create_conf(args.lbl_file,
                                           view,
                                           name='a',
                                           net_type=curm,
                                           cache_dir=cachedir)
                    model_files, ts = get_model_files(conf, cachedir, curm)
                    db_file = os.path.join(cachedir, 'val_TF.tfrecords')
                    for mndx, m in enumerate(model_files):
                        out_file = m + '.gt_data'
                        load = False
                        if curm == 'unet' or curm == 'deeplabcut':
                            mm = m + '.index'
                        else:
                            mm = m
                        if os.path.exists(
                                out_file + '.mat') and os.path.getmtime(
                                    out_file + '.mat') > os.path.getmtime(mm):
                            load = True

                        if load:
                            H = sio.loadmat(out_file)
                            pred = H['pred_locs'] - 1
                            label = H['labeled_locs'] - 1
                            gt_list = H['list'] - 1
                        else:
                            tf_iterator = multiResData.tf_reader(
                                conf, db_file, False)
                            tf_iterator.batch_size = 1
                            read_fn = tf_iterator.next
                            pred_fn, close_fn, _ = apt.get_pred_fn(
                                curm, conf, m)
                            pred, label, gt_list = apt.classify_db(
                                conf, read_fn, pred_fn, tf_iterator.N)
                            close_fn()
                            mat_pred_locs = pred + 1
                            mat_labeled_locs = np.array(label) + 1
                            mat_list = gt_list

                            sio.savemat(
                                out_file, {
                                    'pred_locs': mat_pred_locs,
                                    'labeled_locs': mat_labeled_locs,
                                    'list': mat_list
                                })

                        cur_out.append([
                            pred, label, gt_list, m, out_file, view, cur_split,
                            ts[mndx]
                        ])

            all_preds[curm].append(cur_out)

    with open(
            os.path.join(out_dir, args.name, dir_name,
                         args.gt_name + '_results.p'), 'w') as f:
        pickle.dump(all_preds, f)
Example #3
0
def train_theirs(args):
    H = h5py.File(args.lbl_file, 'r')
    nviews = int(apt.read_entry(H['cfg']['NumViews']))
    all_nets = args.nets

    for curm in all_nets:
        for view in range(nviews):

            if args.split_type is None:

                cachedir = os.path.join(out_dir, args.name, 'theirs',
                                        '{}_view_{}'.format(curm,
                                                            view), 'full')
                singularity_script = os.path.join(cachedir, 'singularity.sh')
                singularity_logfile = os.path.join(cachedir, 'singularity.log')
                f = open(singularity_script, 'w')
                f.write('#!/bin/bash\n')
                f.write('. /opt/venv/bin/activate\n')

                if curm == 'unet':
                    f.write('cd {}\n'.format(unet_dir))
                    cmd = 'APT_interface.py -view {} -cache {} -type unet {} train -skip_db'.format(
                        view + 1, cachedir, args.lbl_file)
                    f.write('python {}'.format(cmd))
                elif curm == 'openpose':
                    f.write('cd {}\n'.format(openpose_dir))
                    cmd = 'train_pose.py {} {} {}'.format(
                        args.lbl_file, cachedir, view)
                    f.write('python {}'.format(cmd))
                elif curm == 'leap':
                    f.write('cd {}\n'.format(leap_dir))
                    data_path = os.path.join(cachedir, 'leap_train.h5')
                    cmd = 'leap/training_MK.py {}'.format(data_path)
                    f.write('python {}'.format(cmd))
                elif curm == 'deeplabcut':
                    f.write('cd {}\n'.format(cachedir))
                    cmd = os.path.join(deepcut_dir, 'pose-tensorflow',
                                       'train.py')
                    f.write('python {}'.format(cmd))
                else:
                    raise ValueError('Undefined net type: {}'.format(curm))

                f.close()
                os.chmod(singularity_script, stat.S_IEXEC)
                cmd = '''ssh 10.36.11.34 '. /misc/lsf/conf/profile.lsf; bsub -oo {}  -n4 -gpu "num=1" -q gpu_any "singularity exec --nv /misc/local/singularity/branson_v2.simg {}"' '''.format(
                    singularity_logfile, singularity_script
                )  # -n4 because we use 4 preprocessing threads
                subprocess.call(cmd, shell=True)
                print('Submitted job: {}'.format(cmd))

            else:

                for cur_split in range(nsplits):
                    cachedir = os.path.join(out_dir, args.name,
                                            '{}_view_{}'.format(curm, view),
                                            'cv_{}'.format(cur_split))
                    singularity_script = os.path.join(cachedir,
                                                      'singularity.sh')
                    singularity_logfile = os.path.join(cachedir,
                                                       'singularity.log')
                    f = open(singularity_script, 'w')
                    f.write('#!/bin/bash\n')
                    f.write('. /opt/venv/bin/activate\n')

                    args.skip_db = True
                    if curm == 'unet':
                        f.write('cd {}\n'.format(unet_dir))
                        cmd = 'APT_interface_mdn.py {} -view {} -cache {} -type unet train -skip_db'.format(
                            args.lbl_file, view + 1, cachedir)
                        f.write('python {}'.format(cmd))
                    elif curm == 'openpose':
                        f.write('cd {}\n'.format(openpose_dir))
                        cmd = 'train_pose.py {} {} {}'.format(
                            args.lbl_file, cachedir, view)
                        f.write('python {}'.format(cmd))
                    elif curm == 'leap':
                        f.write('cd {}\n'.format(leap_dir))
                        data_path = os.path.join(cachedir, 'leap_train.h5')
                        cmd = 'leap/training_MK.py {}'.format(data_path)
                        f.write('python {}'.format(cmd))
                    elif curm == 'deeplabcut':
                        f.write('cd {}\n'.format(cachedir))
                        cmd = os.path.join(deepcut_dir, 'pose-tensorflow',
                                           'train.py')
                        f.write('python {}'.format(cmd))
                    else:
                        raise ValueError('Undefined net type: {}'.format(curm))

                    f.close()
                    os.chmod(singularity_script, stat.S_IEXEC)
                    cmd = '''ssh 10.36.11.34 '. /misc/lsf/conf/profile.lsf; bsub -oo {}  -n4 -gpu "num=1" -q gpu_tesla "singularity exec --nv /misc/local/singularity/branson_v2.simg {}"' '''.format(
                        singularity_logfile, singularity_script
                    )  # -n4 because we use 4 preprocessing threads
                    subprocess.call(cmd, shell=True)
                    print('Submitted job: {}'.format(cmd))
Example #4
0
def train_ours(args):
    H = h5py.File(args.lbl_file, 'r')
    nviews = int(apt.read_entry(H['cfg']['NumViews']))
    dir_name = 'ours_default'

    if len(args.nets) == 0:
        all_nets = methods
    else:
        all_nets = args.nets

    for curm in all_nets:
        for view in range(nviews):

            if args.split_type is None:

                cachedir = os.path.join(out_dir, args.name, dir_name,
                                        '{}_view_{}'.format(curm,
                                                            view), 'full')
                singularity_script = os.path.join(cachedir, 'singularity.sh')
                singularity_logfile = os.path.join(cachedir, 'singularity.log')
                f = open(singularity_script, 'w')
                f.write('#!/bin/bash\n')
                f.write('. /opt/venv/bin/activate\n')

                f.write('cd {}\n'.format(unet_dir))
                cmd = 'APT_interface_mdn.py {} -view {} -cache {} -type {} train -skip_db'.format(
                    args.lbl_file, view + 1, cachedir, curm)
                if args.whose == 'ours_default':
                    cmd += ' -use_defaults'
                f.write('python {}'.format(cmd))
                f.close()
                os.chmod(singularity_script, 0o755)
                cmd = '''ssh 10.36.11.34 '. /misc/lsf/conf/profile.lsf; bsub -oo {}  -n4 -gpu "num=1" -q gpu_tesla "singularity exec --nv /misc/local/singularity/branson_v2.simg {}"' '''.format(
                    singularity_logfile, singularity_script
                )  # -n4 because we use 4 preprocessing threads
                subprocess.call(cmd, shell=True)
                print('Submitted job: {}'.format(cmd))

            else:

                for cur_split in range(nsplits):
                    cachedir = os.path.join(out_dir, args.name,
                                            '{}_view_{}'.format(curm, view),
                                            'cv_{}'.format(cur_split))
                    singularity_script = os.path.join(cachedir,
                                                      'singularity.sh')
                    singularity_logfile = os.path.join(cachedir,
                                                       'singularity.log')
                    f = open(singularity_script, 'w')
                    f.write('#!/bin/bash\n')
                    f.write('. /opt/venv/bin/activate\n')

                    f.write('cd {}\n'.format(unet_dir))
                    cmd = 'APT_interface_mdn.py {} -view {} -cache {} -type {} train -skip_db'.format(
                        args.lbl_file, view + 1, cachedir, curm)
                    if args.whose == 'ours_default':
                        cmd += ' -use_defaults'
                    f.write('python {}'.format(cmd))
                    f.close()
                    os.chmod(singularity_script, 0o755)
                    cmd = '''ssh 10.36.11.34 '. /misc/lsf/conf/profile.lsf; bsub -oo {}  -n4 -gpu "num=1" -q gpu_tesla "singularity exec --nv /misc/local/singularity/branson_v2.simg {}"' '''.format(
                        singularity_logfile, singularity_script
                    )  # -n4 because we use 4 preprocessing threads
                    subprocess.call(cmd, shell=True)
                    print('Submitted job: {}'.format(cmd))
Example #5
0
def create_db(args):
    H = h5py.File(args.lbl_file, 'r')
    nviews = int(apt.read_entry(H['cfg']['NumViews']))
    all_nets = args.nets

    all_split_files = []
    for view in range(nviews):
        if args.split_type is not None and not args.split_type.startswith(
                'prog'):
            cachedir = os.path.join(out_dir, args.name, 'common')
            if not os.path.exists(cachedir):
                os.mkdir(cachedir)
            cachedir = os.path.join(out_dir, args.name, 'common',
                                    'splits_{}'.format(view))
            if not os.path.exists(cachedir):
                os.mkdir(cachedir)
            conf = apt.create_conf(args.lbl_file,
                                   view,
                                   args.name,
                                   cache_dir=cachedir)
            conf.splitType = args.split_type
            print("Split type is {}".format(conf.splitType))
            if args.do_split:
                train_info, val_info, split_files = apt.create_cv_split_files(
                    conf, nsplits)
            else:
                split_files = [
                    os.path.join(conf.cachedir,
                                 'cv_split_fold_{}.json'.format(ndx))
                    for ndx in range(nsplits)
                ]
            all_split_files.append(split_files)

    for curm in all_nets:
        for view in range(nviews):

            if args.split_type is None:

                cachedir = os.path.join(out_dir, args.name, 'common',
                                        '{}_view_{}'.format(curm,
                                                            view), 'full')
                conf = apt.create_conf(args.lbl_file,
                                       view,
                                       args.name,
                                       cache_dir=cachedir)
                if not args.only_check:
                    if not os.path.exists(conf.cachedir):
                        os.makedirs(conf.cachedir)
                    if curm == 'unet' or curm == 'openpose':
                        apt.create_tfrecord(conf, False)
                    elif curm == 'leap':
                        apt.create_leap_db(conf, False)
                    elif curm == 'deeplabcut':
                        apt.create_deepcut_db(conf, False)
                        create_deepcut_cfg(conf)
                    else:
                        raise ValueError('Undefined net type: {}'.format(curm))

                check_db(curm, conf)
            elif args.split_type.startswith('prog'):
                split_type = args.split_type[5:]
                all_info = get_increasing_splits(conf, split_type)

                for cur_tr in prog_thresholds:
                    cachedir = os.path.join(out_dir, args.name, 'common',
                                            '{}_view_{}'.format(curm, view),
                                            '{}'.format(cur_tr))
                    conf = apt.create_conf(args.lbl_file,
                                           view,
                                           args.name,
                                           cache_dir=cachedir)
                    split_ndx = round(len(all_info) / cur_tr)
                    cur_train = all_info[:split_ndx]
                    cur_val = all_info[split_ndx:]
                    split_file = os.path.join(cachedir, 'splitdata.json')
                    with open(split_file, 'w') as f:
                        json.dump([cur_train, cur_val], f)
                    if not args.only_check:
                        if curm == 'unet' or curm == 'openpose':
                            apt.create_tfrecord(conf, True, split_file)
                        elif curm == 'leap':
                            apt.create_leap_db(conf, True, split_file)
                        elif curm == 'deeplabcut':
                            apt.create_deepcut_db(conf, True, split_file)
                            create_deepcut_cfg(conf)
                        else:
                            raise ValueError(
                                'Undefined net type: {}'.format(curm))
                    check_db(curm, conf)

            else:

                split_files = all_split_files[view]

                for cur_split in range(nsplits):
                    conf.cachedir = os.path.join(
                        out_dir, args.name, 'common',
                        '{}_view_{}'.format(curm, view))
                    if not os.path.exists(conf.cachedir):
                        os.mkdir(conf.cachedir)
                    conf.cachedir = os.path.join(
                        out_dir, args.name, 'common',
                        '{}_view_{}'.format(curm,
                                            view), 'cv_{}'.format(cur_split))
                    if not os.path.exists(conf.cachedir):
                        os.mkdir(conf.cachedir)
                    conf.splitType = 'predefined'
                    split_file = split_files[cur_split]
                    if not args.only_check:
                        if curm == 'unet' or curm == 'openpose':
                            apt.create_tfrecord(conf, True, split_file)
                        elif curm == 'leap':
                            apt.create_leap_db(conf, True, split_file)
                        elif curm == 'deeplabcut':
                            apt.create_deepcut_db(conf, True, split_file)
                            create_deepcut_cfg(conf)
                        else:
                            raise ValueError(
                                'Undefined net type: {}'.format(curm))
                    check_db(curm, conf)

        base_dir = os.path.join(out_dir, args.name, 'common')
        their_dir = os.path.join(out_dir, args.name, 'theirs')
        our_dir = os.path.join(out_dir, args.name, 'ours')
        our_default_dir = os.path.join(out_dir, args.name, 'ours_default')
        cmd = 'cp -rs {} {}'.format(base_dir, their_dir)
        os.system(cmd)
        cmd = 'cp -rs {} {}'.format(base_dir, our_dir)
        os.system(cmd)
        cmd = 'cp -rs {} {}'.format(base_dir, our_default_dir)
        os.system(cmd)