コード例 #1
0
 
 set_type = bn.partition('_')[0]
 model_path = Path.home() / 'workspace/WormData/worm-poses/results' / set_type  / bn / 'model_best.pth.tar'
 
 
 cuda_id = 0
 device = get_device(cuda_id)
 batch_size = 64
 roi_size = 256
 
 images_queue_size = 2
 results_queue_size = 4
 
 model_args = get_model_arguments(bn)
 #%%
 model = PoseDetector(**model_args)
 state = torch.load(model_path, map_location = 'cpu')
 model.load_state_dict(state['state_dict'])
 model.eval()
 model = model.to(device)
 
 #%%
 root_dir = Path.home() / 'workspace/WormData/screenings/Bertie_movies'
 #root_dir = Path('/Users/avelinojaver/OneDrive - Nexus365/worms/Bertie_movies')
 
 mask_dir = root_dir / 'MaskedVideos'
 save_root_dir = root_dir / ('ResultsNN_' + bn)
 
 #mask_files = list(mask_dir.rglob('JU2565_Ch2_27082017_112328*.hdf5'))
 mask_files = list(mask_dir.rglob('*.hdf5'))
 random.shuffle(mask_files)
コード例 #2
0
            features_type='vgg11',
            n_stages=4,
        )
    else:
        model_args = dict(
            n_segments=25,
            n_affinity_maps=21,
            features_type='vgg19',
            n_stages=6,
        )

    if '+head' in bn:
        model_args['n_segments'] += 1

    #%%
    model = PoseDetector(**model_args, return_belive_maps=True)

    model.load_state_dict(state['state_dict'])
    model.eval()
    #%%
    # fnames = ['/Users/avelinojaver/workspace/WormData/screenings/pesticides_adam/Syngenta/MaskedVideos/test_SYN_001_Agar_Screening_310317/N2_worms10_food1-3_Set2_Pos4_Ch5_31032017_220113.hdf5',
    #           '/Users/avelinojaver/OneDrive - Imperial College London/tierpsy_examples/aggregation/N2_1_Ch1_29062017_182108_comp3.hdf5'
    #           ]

    # fnames = [Path.home() / 'workspace/WormData/screenings/mating_videos/Mating_Assay/Mating_Assay_030718/MaskedVideos/Set1_CB369_CB1490/Set1_CB369_CB1490_Ch1_03072018_163429.hdf5',
    #          Path.home() / 'workspace/WormData/screenings/mating_videos/wildMating/MaskedVideos/20180819_wildMating/wildMating5.2_MY23_self_CB4856_self_PC2_Ch1_19082018_123257.hdf5',
    #          Path.home() / 'workspace/WormData/screenings/mating_videos/wildMating/MaskedVideos/20180818_wildMating/wildMating4.2_CB4856_self_CB4856_self_PC3_Ch2_18082018_124339.hdf5'
    #          ]

    # fnames = ['/Users/avelinojaver/workspace/WormData/screenings/pesticides_adam/Syngenta/MaskedVideos/test_SYN_001_Agar_Screening_310317/N2_worms10_food1-3_Set2_Pos4_Ch5_31032017_220113.hdf5',
    #           '/Users/avelinojaver/OneDrive - Imperial College London/tierpsy_examples/aggregation/N2_1_Ch1_29062017_182108_comp3.hdf5'
コード例 #3
0
ファイル: train_PAF.py プロジェクト: ver228/worm-poses
def train_PAF(
        data_type='v2',
        model_name='openpose',
        cuda_id=0,
        log_dir_root=log_dir_root_dflt,
        batch_size=16,
        num_workers=1,
        loss_type='maxlikelihood',
        lr=1e-4,
        weight_decay=0.0,
        n_epochs=1,  #1000,
        save_frequency=200,
        init_model_path=None):

    d_args = data_types[data_type]
    flow_args = d_args['flow_args']
    root_dir = d_args['root_dir']

    log_dir = log_dir_root / data_type

    return_bboxes = False
    return_half_bboxes = False
    if not 'openpose' in model_name:
        if 'halfboxes' in data_type:
            return_half_bboxes = True
        else:
            return_bboxes = True

    model_args = available_models[model_name]
    train_flow = SkelMapsFlow(
        root_dir=root_dir,
        set2read='train',
        #set2read = 'validation',
        #samples_per_epoch = 1000,
        return_key_value_pairs=True,
        PAF_seg_dist=model_args['PAF_seg_dist'],
        n_segments=model_args['n_segments'],
        fold_skeleton=model_args['fold_skeleton'],
        return_bboxes=return_bboxes,
        return_half_bboxes=return_half_bboxes,
        **flow_args)

    val_flow = SkelMapsFlowValidation(
        root_dir=root_dir,
        set2read='validation',
        return_key_value_pairs=True,
        PAF_seg_dist=model_args['PAF_seg_dist'],
        n_segments=model_args['n_segments'],
        fold_skeleton=model_args['fold_skeleton'],
        return_bboxes=return_bboxes,
        return_half_bboxes=return_half_bboxes,
        **flow_args)

    if 'openpose' in model_name:
        model = PoseDetector(n_segments=train_flow.n_segments_out,
                             n_affinity_maps=train_flow.n_affinity_maps_out,
                             n_stages=model_args['n_stages'],
                             features_type=model_args['features_type'],
                             use_head_loss=model_args['use_head_loss'],
                             pose_loss_type=model_args['pose_loss_type'])
    else:
        model = get_keypointrcnn(backbone=model_args['backbone'],
                                 num_classes=2,
                                 num_keypoints=train_flow.n_segments_out)

    if init_model_path is not None:
        model_name = 'R+' + model_name
        state = torch.load(init_model_path, map_location='cpu')
        model.load_state_dict(state['state_dict'])

    device = get_device(cuda_id)
    lr_scheduler = None

    model_params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = torch.optim.Adam(model_params,
                                 lr=lr,
                                 weight_decay=weight_decay)

    now = datetime.datetime.now()
    date_str = now.strftime('%Y%m%d_%H%M%S')

    basename = f'{data_type}_{model_name}_{loss_type}_{date_str}_adam_lr{lr}_wd{weight_decay}_batch{batch_size}'

    train_poses(basename,
                model,
                device,
                train_flow,
                val_flow,
                optimizer,
                log_dir,
                lr_scheduler=lr_scheduler,
                batch_size=batch_size,
                n_epochs=n_epochs,
                num_workers=num_workers,
                save_frequency=save_frequency)
コード例 #4
0
ファイル: from_images.py プロジェクト: ver228/worm-poses
        )

    else:
        model_args = dict(
            n_segments=25,
            n_affinity_maps=21,
            features_type='vgg19',
            n_stages=6,
        )

    if '+head' in bn:
        model_args['n_segments'] += 1

    #%%
    model = PoseDetector(**model_args,
                         return_belive_maps=True,
                         keypoint_max_dist=20,
                         nms_min_distance=5)

    model.load_state_dict(state['state_dict'])
    model.eval()
    #%%
    root_dir = Path('/Users/avelinojaver/Downloads/BBBC010_v1_images/')

    fnames = list(root_dir.glob('*w2*.tif'))[:10]
    for ifname, fname in enumerate(tqdm.tqdm(fnames)):

        img = cv2.imread(str(fname), -1)
        #img = img.astype(np.float32)/4095.
        img = img.astype(np.float32) / img.max()
        #img = cv2.blur(img, ksize = (5,5))