コード例 #1
0
ファイル: prepare_models.py プロジェクト: RonaldGalea/imATFIB
def prepare(model_ids, dataset_name):
    """
    Args:
    model_ids: list of str - ids of models for inference
    dataset_name: str - will construct validation loader of the dataset

    ! Important:
    models are expected to have same settings regarding dataset and training (eg roi, input res)

    Returns:
    loaded models ready for inference in a list
    validation_loader
    params
    """
    models, params_list, configs_list = [], [], []
    for id in model_ids:
        params = Params(constants.params_path.format(dataset_name, id))
        config = Params(constants.config_path.format(dataset_name, id))
        validate_params(params)
        print("Constructing model: ", id)
        model = training_setup.model_setup(params, config)
        # in this case, model id is equal to exp name
        training_setup.load_model_weights(model, dataset_name, id)
        model.eval()
        prints.print_trained_parameters_count(model)
        models.append(model)
        params_list.append(params)
        configs_list.append(config)

    if len(model_ids) == 1:
        return models[0], params_list[0], configs_list[0]
    return models, params_list, configs_list
コード例 #2
0
def test_sampler():

    dt1 = EvMaskPoseDataset(1, Params())
    dt2 = EvMaskPoseDataset(2, Params())
    dt3 = EvMaskPoseDataset(3, Params())
    dt4 = EvMaskPoseDataset(4, Params())
    cdt = ConcatDataset([dt1, dt2, dt3, dt4])
    custom_sampler = ConcatDataSampler(cdt, 4, True)

    assert len(custom_sampler) == len(cdt) // 4
    for n in custom_sampler:
        assert n is not None
コード例 #3
0
def save_predictions_test(predicted_volumes, model_id, dataset_name):
    """
    list of ndarrays representing volume predictions
    """
    config = Params(constants.config_path.format(dataset_name, model_id))
    params = Params(constants.params_path.format(dataset_name, model_id))
    test_dataloader = training_setup.prepare_test_loader(params, config)

    for batch_idx, (volume, orig_shape, header_info, img_path) in enumerate(test_dataloader):
        print("Volume shape: ", volume.shape, " and original shape: ", orig_shape)
        processed_volume = predicted_volumes[batch_idx]
        save_volume(processed_volume, header_info, batch_idx, img_path=img_path)
        print("Volume number ", batch_idx+1, " saved successfully!")
コード例 #4
0
def save_predictions(predicted_volumes, model_id, dataset_name):
    """
    list of ndarrays representing volume predictions
    """
    config = Params(constants.config_path.format(dataset_name, model_id))
    params = Params(constants.params_path.format(dataset_name, model_id))
    validation_dataloader = training_setup.prepare_val_loader(params, config)

    for batch_idx, (_, mask, _, header_info) in enumerate(validation_dataloader):
        mask = mask.numpy().astype(np.uint8)
        processed_volume = predicted_volumes[batch_idx]
        save_volume(processed_volume, header_info, batch_idx)
        save_volume(mask, header_info, batch_idx, type="gt")

        print("Volume number ", batch_idx+1, " saved successfully!")
コード例 #5
0
ファイル: tester.py プロジェクト: hsfzxjy/WRSFKM
    def target(self, index):

        from init import init_uv
        from iterations import run

        X, C, labels = load_dataset(self.dataset)

        U, V = init_uv(X, C, Params(dict(**self.init_params, **self.mutual)))

        initial = {name: (U.copy(), V.copy()) for name in self.params}

        result = {}

        for name, param in self.params.items():
            p = Params({
                **param,
                **self.mutual, 'initial': initial[name],
                'init': 'preset',
                'C': C
            })

            dest = os.path.join(self.root_directory, name + '.h5.' +
                                str(index)) if self.root_directory else ''

            print('running', name)
            logger = Logger(dest)
            start_time = time()
            result = run(X, labels, p, logger)
            end_time = time()
            time_elasped = end_time - start_time
            result = (*result, time_elasped)
            print(name, result[2:])
            logger.log_final(*result)
            logger.close()

        return result
コード例 #6
0
def data():

    from init import init_uv
    from utils.datasets import load_dataset
    from utils.params import Params

    X, C, labels = load_dataset('mnist_10k')

    return (
        X,
        *init_uv(
            X, C,
            Params({
                'method': 'random'
            })
        ),
        labels
    )
コード例 #7
0
    def __init__(self, data_dir, params):
        """
        Loads dataset_params, vocabulary and tags. Ensure you have run
        `build_vocab.py` on data_dir before using this class.

        Parameters
        ----------
        data_dir: (string) directory containing the dataset
        params: (Params) hyperparameters of the training process. This function
            modifies params and appends dataset_params (such as vocab_size,
            num_of_tags etc.) to params.
        """

        # loading dataset_params
        json_path = os.path.join(data_dir, 'dataset_params.json')
        if not os.path.isfile(json_path):
            raise ValueError(
                "No json file found at {}, run build_vocab.py".format(
                    json_path))
        self.dataset_params = Params(json_path)

        # loading vocab (we require this to map words to their indices)
        vocab_path = os.path.join(data_dir, 'words.txt')
        self.vocab = {}
        with open(vocab_path) as f:
            for i, l in enumerate(f.read().splitlines()):
                self.vocab[l] = i

        # setting the indices for UNKnown words and PADding symbols
        self.unk_ind = self.vocab[self.dataset_params.unk_word]
        self.pad_ind = self.vocab[self.dataset_params.pad_word]

        # loading tags (we require this to map tags to their indices)
        tags_path = os.path.join(data_dir, 'tags.txt')
        self.tag_map = {}
        with open(tags_path) as f:
            for i, t in enumerate(f.read().splitlines()):
                self.tag_map[t] = i

        # adding dataset parameters to param (e.g. vocab size, )
        params.update(json_path)
コード例 #8
0
ファイル: train.py プロジェクト: feinanshan/Motion-Guided-CRN
def train_CRN_Net(args):
    cfg = Params(os.path.join("../config", args.config))
    # set up params
    cfg.ctx = [int(i) for i in args.gpu.split(',')]
    if len(cfg.ctx) > 1:
        Exception('Only for 1-GPU mode')

    cfg_attr = vars(cfg)
    cfg_attr.update(vars(args))

    # set up data loader
    trainIter = dataIter(cfg)

    # set up model
    if cfg.backbone == 'CRN_Res101':
        model = CRN(mtype=101, num_classes=1)
    if cfg.backbone == 'CRN_Res50':
        model = CRN(mtype=50, num_classes=1)

    if args.resume:
        mdl_dir = args.model_dir
        pre_pfx = args.pretrained_prefix
        pre_epc = args.pretrained_epoch
        net_pfx = cfg.network
        saved_state_dict = torch.load(
            os.path.join(mdl_dir, pre_pfx, net_pfx + '_' + pre_epc + '.pth'))
        model.load_state_dict(saved_state_dict)

    if cfg.use_global_stats == True:
        model.eval()  #

    if len(cfg.ctx) > 0:
        model.cuda(cfg.ctx[0])

    # set up optimizer
    if cfg.optimizer == 'SGD':
        optimizer = optim.SGD(model.parameters(),
                              lr=cfg.learning_rate,
                              momentum=cfg.momentum,
                              weight_decay=cfg.wd)  #
    elif cfg.optimizer == 'Adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=cfg.learning_rate,
                               weight_decay=cfg.wd)
    else:
        Exception('SGD or Adam')

    optimizer.zero_grad()

    # set up model path
    model_path = os.path.join(args.model_dir, cfg.prefix)
    if not os.path.isdir(model_path):
        os.mkdir(model_path)
    model_full_path = os.path.join(model_path,
                                   datetime.now().strftime('%Y_%m_%d_%H_%M'))
    if not os.path.isdir(model_full_path):
        os.mkdir(model_full_path)

    # set up log
    util.save_log(cfg.prefix, model_full_path)
    logging.info(
        '---------------------------TIME-------------------------------')
    logging.info('-------------------{}------------------------'.format(
        datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
    for k, v in sorted(cfg_attr.items(), key=lambda x: x[0]):
        logging.info("%s : %s", k, v)

    logger = logging.getLogger()
    logger.setLevel(logging.INFO)

    # training phase
    for epoch in range(cfg.begin_epoch, cfg.end_epoch):
        trainIter.reset()
        totalLoss = 0.0
        tic = time.time()
        for iter in tqdm(range(trainIter.iter_cnt)):

            img, msk_16, msk_32, gt0, gt1, gt2, gt3, gt4, gt5 = trainIter.next(
            )

            out = model([img, msk_32])
            loss0 = Loss_calc(out[0], gt0)
            loss1 = Loss_calc(out[1], gt1)
            loss2 = Loss_calc(out[2], gt2)
            loss3 = Loss_calc(out[3], gt3)
            loss4 = Loss_calc(out[4], gt4)
            loss5 = Loss_calc(out[5], gt5)

            loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5

            loss.backward()
            if iter % cfg.updateIter == 0:
                optimizer.step()
                optimizer.zero_grad()
            totalLoss += loss.data.cpu().numpy() / trainIter.iter_cnt

        logger.info('Epoch[%d] Train-Loss=%.5f', epoch, totalLoss)
        toc = time.time()
        logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))

        if epoch % cfg.frequence == 0:
            print 'taking snapshot: ' + os.path.join(cfg.network + '_' +
                                                     str(epoch) + '.pth')
            torch.save(
                model.cpu().state_dict(),
                os.path.join(model_full_path,
                             cfg.network + '-' + str(epoch) + '.pth'))
            model.cuda(cfg.ctx[0])
コード例 #9
0
def main():
    start_time = time.strftime("%d%m%y_%H%M%S")
    parser = argparse.ArgumentParser()
    parser.add_argument("model_name",
                        type=str,
                        help="Pass name of model as defined in hparams.yaml.")
    args = parser.parse_args()
    # Parse our YAML file which has our model parameters.
    params = Params("hparams.yaml", args.model_name)
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = params.gpu_vis_dev
    # Check if a GPU is available and use it if so.
    use_gpu = torch.cuda.is_available()
    device = torch.device("cuda" if use_gpu else "cpu")

    # Load model that has been chosen via the command line arguments.
    model_module = __import__('.'.join(['models', params.model_name]),
                              fromlist=['object'])
    model = model_module.net(**params.net_args)
    # Send the model to the chosen device.
    # To use multiple GPUs
    # model = nn.DataParallel(model)
    model.to(device)
    # Grap your training and validation functions for your network.
    train = model_module.train
    val = model_module.val

    optimizer = optim.Adam(model.parameters(), lr=params.lr)

    # This is useful if you have multiple custom datasets defined.
    Dataset = getattr(Datasets, params.dataset_class)

    augments_train = getattr(model_augments, params.augments_train)()
    augments_val = getattr(model_augments, params.augments_val)()

    train_data = Dataset(params.data_dir + "/train",
                         augmentations=augments_train)
    val_data = Dataset(params.data_dir + "/val", augmentations=augments_val)

    train_loader = DataLoader(train_data,
                              batch_size=params.batch_size,
                              shuffle=True)
    val_loader = DataLoader(val_data,
                            batch_size=params.batch_size,
                            shuffle=False)
    os.makedirs(params.log_dir, exist_ok=True)
    os.makedirs(params.checkpoint_dir, exist_ok=True)
    os.makedirs(params.data_dir, exist_ok=True)
    os.makedirs("figs", exist_ok=True)

    val_roc_aucs = []
    val_losses = []
    train_losses = []
    train_roc_aucs = []
    for epoch in range(1, params.num_epochs + 1):
        print("Epoch: {}".format(epoch))
        # Call training function.
        train(model, device, train_loader, optimizer)
        # Evaluate on both the training and validation set.
        train_loss, train_roc_auc = val(model, device, train_loader)
        val_loss, val_roc_auc = val(model, device, val_loader)
        # Collect some data for logging purposes.
        train_losses.append(float(train_loss))
        train_roc_aucs.append(train_roc_auc)
        val_losses.append(float(val_loss))
        val_roc_aucs.append(val_roc_auc)

        print(
            '\n\ttrain Loss: {:.6f}\ttrain roc_auc: {:.6f} \n\tval Loss: {:.6f}\tval roc_auc: {:.6f}'
            .format(train_loss, train_roc_auc, val_loss, val_roc_auc))
        # Here is a simply plot for monitoring training.
        # Clear plot each epoch
        fig = plot_training(train_losses, train_roc_aucs, val_losses,
                            val_roc_aucs)
        fig.savefig(
            os.path.join("figs", "{}_training_vis".format(args.model_name)))
        # Save model every few epochs (or even more often if you have the disk space).
        if epoch % 5 == 0:
            torch.save(
                model.state_dict(),
                os.path.join(
                    params.checkpoint_dir,
                    "checkpoint_{}_epoch_{}".format(args.model_name, epoch)))
    # Some log information to help you keep track of your model information.
    logs = {
        "model": args.model_name,
        "net_args": params.net_args,
        "train_losses": train_losses,
        "train_roc_aucs": train_roc_aucs,
        "val_losses": val_losses,
        "val_roc_aucs": val_roc_aucs,
        "best_val_epoch": int(np.argmax(val_roc_aucs) + 1),
        "model": args.model_name,
        "lr": params.lr,
        "batch_size": params.batch_size,
        "augments_train": str(augments_train),
        "augments_val": str(augments_val)
    }

    with open(
            os.path.join(params.log_dir,
                         "{}_{}.json".format(args.model_name, start_time)),
            'w') as f:
        json.dump(logs, f)
コード例 #10
0
ファイル: run.py プロジェクト: LittleBoyHacker/diasenti
        save_model(model, params, performance_str)

        return performance_dict


if __name__ == '__main__':

    parser = argparse.ArgumentParser(
        description='running experiments on multimodal datasets.')
    parser.add_argument('-config',
                        action='store',
                        dest='config_file',
                        help='please enter configuration file.',
                        default='config/run.ini')
    args = parser.parse_args()
    params = Params()
    params.parse_config(args.config_file)
    params.config_file = args.config_file
    mode = 'run'
    if 'mode' in params.__dict__:
        mode = params.mode
    set_seed(params)

    params.device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    if mode == 'run':
        results = []
        reader = setup(params)
        reader.read(params)
        print(params.output_dim_emo)
コード例 #11
0
ファイル: train.py プロジェクト: feinanshan/Motion-Guided-CRN
def train_Tube_Net(args):
	cfg = Params(os.path.join("../config",args.config))
	# set up params
	cfg.ctx = [int(i) for i in args.gpu.split(',')]
	if len(cfg.ctx)>1:
		Exception('Only for 1-GPU mode')


	cfg_attr = vars(cfg)
	cfg_attr.update(vars(args))

	# set up model path
	model_path = os.path.join(args.model_dir, cfg.prefix)
	if not os.path.isdir(model_path):
		os.mkdir(model_path)
	model_full_path = os.path.join(
		model_path, datetime.now().strftime('%Y_%m_%d_%H_%M'))
	if not os.path.isdir(model_full_path):
		os.mkdir(model_full_path)


	# set up log
	util.save_log(cfg.prefix, model_full_path)
	logging.info(
		'---------------------------TIME-------------------------------')
	logging.info('-------------------{}------------------------'.format(
		datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
	for k, v in sorted(cfg_attr.items(), key=lambda x: x[0]):
		logging.info("%s : %s", k, v)

	logger = logging.getLogger()
	logger.setLevel(logging.INFO)






	name_list, f1_img, f1_gt, img_list, label_list = read_vos_test_list(cfg)
	mIoU = 0.0
	MAE = 0.0
	seq_num = len(name_list)


	for seq_i in range(seq_num):

		seq = name_list[seq_i]
		f1_i = f1_img[seq_i]
		f1_g = f1_gt[seq_i]
		imgs = img_list[seq_i]
		labels =  label_list[seq_i]


		# set up model
		if cfg.backbone == 'CRN_Res101':
			model = CRN(mtype=101,num_classes=1)
		if cfg.backbone == 'CRN_Res50':
			model = CRN(mtype=50,num_classes=1)

		if args.resume==1:		
			print('load model:'+args.trained_path)
			logging.info('load model:'+args.trained_path)
			saved_state_dict = torch.load(args.trained_path)
			model.load_state_dict(saved_state_dict)

		if cfg.use_global_stats == True:
			model.eval() # use_global_stats = True 

		if len(cfg.ctx)>0:
			model.cuda(cfg.ctx[0])

		# set up optimizer
		if cfg.optimizer == 'SGD':
			optimizer = optim.SGD(model.parameters(),lr = cfg.learning_rate, 
													momentum = cfg.momentum, weight_decay = cfg.wd)#
		elif cfg.optimizer == 'Adam':
			optimizer = optim.Adam(model.parameters(),lr = cfg.learning_rate, weight_decay = cfg.wd)
		else:
			Exception('SGD or Adam')

		optimizer.zero_grad()



		totalLoss = 0.0
		tic = time.time()

		logger.info('Finetuning on the set- %s', seq)

		for epoch in tqdm(range(cfg.finetune_epoch)):
			img,msk_16,msk_32,gt0,gt1,gt2,gt3,gt4,gt5= get_training_batch(cfg,f1_i,f1_g)
			
			out = model([img,msk_32])
			loss0 = Loss_calc(out[0],gt0)
			loss1 = Loss_calc(out[1],gt1)
			loss2 = Loss_calc(out[2],gt2)
			loss3 = Loss_calc(out[3],gt3)
			loss4 = Loss_calc(out[4],gt4)
			loss5 = Loss_calc(out[5],gt5)
			loss = loss0 + loss1 + loss2 + loss3 + loss4 + loss5
			loss.backward()
			optimizer.step()
			optimizer.zero_grad()
			totalLoss += loss.data.cpu().numpy()/cfg.finetune_epoch

		model.eval()
		torch.save(model.cpu().state_dict(),os.path.join(model_full_path,seq+'_iter'+str(cfg.finetune_epoch)+'.pth'))
		model.cuda(cfg.ctx[0])

		toc = time.time()
		logger.info('Train-Loss=%.5f, Time cost=%.3f', totalLoss,(toc - tic))

		iouu = 0.0
		mae = 0.0


	
		img_ = imgs[0]
		label_ = labels[0]

		img_test,lbl_test,mask_test=get_testing_batch(cfg,img_,label_)

		mask__ = lbl_test[0,0].cpu().data.numpy()

		for test_ in range(len(imgs)):
			model.eval()
			torch.cuda.empty_cache()
			img_ = imgs[test_]
			label_ = labels[test_]

			img_test,lbl_test,mask_test=get_testing_batch(cfg,img_,label_)

			kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(8,8))  
			mask__ = cv2.dilate(mask__,kernel) 

			mask_temp = mask__>0
			msk_atn0 = cv2.resize(mask_temp.astype(np.float),(256,256) , interpolation = cv2.INTER_NEAREST)
			msk_atn0 = Variable(torch.from_numpy(msk_atn0).float().view(1,1,256,256)).cuda(cfg.ctx[0])	
			msk_atn1 = cv2.resize(mask_temp.astype(np.float),(128,128) , interpolation = cv2.INTER_NEAREST)
			msk_atn1 = Variable(torch.from_numpy(msk_atn1).float().view(1,1,128,128)).cuda(cfg.ctx[0])	
			msk_atn2 = cv2.resize(mask_temp.astype(np.float),(64,64) , interpolation = cv2.INTER_NEAREST)
			msk_atn2 = Variable(torch.from_numpy(msk_atn2).float().view(1,1,64,64)).cuda(cfg.ctx[0])	
			msk_atn3 = cv2.resize(mask_temp.astype(np.float),(32,32) , interpolation = cv2.INTER_NEAREST)
			msk_atn3 = Variable(torch.from_numpy(msk_atn3).float().view(1,1,32,32)).cuda(cfg.ctx[0])	
			msk_atn4 = cv2.resize(mask_temp.astype(np.float),(16,16) , interpolation = cv2.INTER_NEAREST)
			msk_atn4 = Variable(torch.from_numpy(msk_atn4).float().view(1,1,16,16)).cuda(cfg.ctx[0])	

			mask_ = cv2.resize(mask__.astype(np.float),(16,16) )
			mask_t = Variable(torch.from_numpy(mask_).float().view(1,16,16))

			out = model([img_test,mask_t.cuda(cfg.ctx[0])])
			mask_out = (out[0][0,0].cpu().data.numpy()>0.5).astype(np.float)

			kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(64, 64))  
			mask__ = cv2.dilate(mask__,kernel) 
 
		 	mask__ = mask_out*mask__

			lbl_test = (lbl_test).cpu().data.numpy()

			#cv2.imshow("mask__",mask__)  
			#cv2.waitKey (50) 

			iouu += compute_iou_for_binary_segmentation(mask__>0.5,lbl_test[:,:]>0.5)
			mae += (np.abs(mask__-lbl_test[:,:])).sum()
		mIoU += iouu/len(imgs)
		MAE += mae/len(imgs)/(cfg.frame_num*cfg.img_size/8*cfg.img_size/8)

		logger.info('Testing: mIoU-%.5f,  MAE-%.5f', iouu/len(imgs), mae/len(imgs)/(cfg.frame_num*cfg.img_size/8*cfg.img_size/8))
		logger.info('---------------------------------')
	
	logger.info('####################################')
	logger.info('Total Testing: mIoU-%.5f,  MAE-%.5f', mIoU/seq_num, MAE/seq_num)
	logger.info('####################################')
コード例 #12
0
        # results["mean_iou"] = IOULoss().forward(groundtruth, mesh_silhouettes).detach().cpu().numpy().tolist()
        # results["mean_dice"] = DiceCoeffLoss().forward(groundtruth, mesh_silhouettes)

        manager.set_pred_results(results)
        manager.close()


if __name__ == "__main__":

    logging.basicConfig(level=logging.INFO,
                        format="%(levelname)s: %(message)s")

    args = get_args()
    args_dict = vars(args)
    params = Params()
    params.config_file = args_dict['config_file']
    params.__post_init__()
    params._set_with_dict(args_dict)
    params.ransac_iou_threshold = args_dict['ransac_iou_threshold']

    # Set the device
    dev_num = params.gpu_num
    os.environ["CUDA_VISIBLE_DEVICES"] = dev_num
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu")
    logging.info(f"Using {device} as computation device")
    if device == f"cuda":
        torch.cuda.set_device()
    logging.info(f"Using {device} as computation device")
    params.device = device
コード例 #13
0
        {
            "model": model.module.state_dict(),
            "unet_optimizer": unet_optimizer.state_dict(),
            "pose_optimizer": pose_optimizer.state_dict(),
        },
        model_dir,
    )


if __name__ == "__main__":

    logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")

    args = get_args()
    args_dict = vars(args)
    params = Params()
    params.config_file = args_dict['config_file']
    params.__post_init__()

    params.gpu_num = args_dict['gpu_num']
    # Set the device
    dev_num = params.gpu_num
    os.environ["CUDA_VISIBLE_DEVICES"] = dev_num
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    device = torch.device(f"cuda" if torch.cuda.is_available() else "cpu")
    logging.info(f"Using {device} as computation device")

    if device == f"cuda":
        torch.cuda.set_device()
    logging.info(f"Using {device} as computation device")
    params.device = device
コード例 #14
0
def run(params):
    train(params)
    print('loading the pretraining model...')
    model = torch.load('dummy_files/best_model.pt')
    generate_visual_rep(model, params)


if __name__ == '__main__':

    parser = argparse.ArgumentParser(
        description='running experiments on multimodal datasets.')
    parser.add_argument('-config',
                        action='store',
                        dest='config_file',
                        help='please enter configuration file.',
                        default='config/pretrain_visual.ini')
    args = parser.parse_args()
    params = Params()
    params.parse_config(args.config_file)
    params.config_file = args.config_file
    mode = 'run'
    if 'mode' in params.__dict__:
        mode = params.mode
    set_seed(params)
    params.device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    if mode == 'extract_raw_feature':
        extract_raw_features(params)
    elif mode == 'run':
        run(params)
コード例 #15
0
import os
import cv2
import numpy as np
import tensorflow as tf
from tensorflow.keras.losses import categorical_crossentropy
from tensorflow.keras.optimizers import Adam, SGD
from tensorflow.keras.models import load_model
from models.networks import get_models_I3D_LSTM
from tensorflow.keras.callbacks import ModelCheckpoint
from utils.pre_process import pre_process_sequence
from utils.params import Params
from utils.videoto3D import Videoto3D
from utils.sequence import UCF101_Sequence

model = get_models_I3D_LSTM()
prs = Params()

X_train, X_test, y_train, y_test = pre_process_sequence()

loader = Videoto3D(n_frames=40, mode='rgb')

sequenceTrain = UCF101_Sequence(X_train,
                                y_train,
                                loader=loader,
                                folder_image=prs.path_img,
                                batch_size=prs.batch_size,
                                image_size=prs.image_size)
sequenceTest = UCF101_Sequence(X_test,
                               y_test,
                               loader=loader,
                               folder_image=prs.path_img,
コード例 #16
0
ファイル: from_params.py プロジェクト: shtechair/ACE
    def from_params(cls: Type[T], params: Params, **extras) -> T:
        """
        This is the automatic implementation of `from_params`. Any class that subclasses `FromParams`
        (or `Registrable`, which itself subclasses `FromParams`) gets this implementation for free.
        If you want your class to be instantiated from params in the "obvious" way -- pop off parameters
        and hand them to your constructor with the same names -- this provides that functionality.

        If you need more complex logic in your from `from_params` method, you'll have to implement
        your own method that overrides this one.
        """
        # pylint: disable=protected-access
        from stog.utils.registrable import Registrable  # import here to avoid circular imports

        logger.info(
            f"instantiating class {cls} from params {getattr(params, 'params', params)} "
            f"and extras {extras}")

        if params is None:
            return None

        registered_subclasses = Registrable._registry.get(cls)

        if registered_subclasses is not None:
            # We know ``cls`` inherits from Registrable, so we'll use a cast to make mypy happy.
            # We have to use a disable to make pylint happy.
            # pylint: disable=no-member
            as_registrable = cast(Type[Registrable], cls)
            default_to_first_choice = as_registrable.default_implementation is not None
            choice = params.pop_choice(
                "type",
                choices=as_registrable.list_available(),
                default_to_first_choice=default_to_first_choice)
            subclass = registered_subclasses[choice]

            # We want to call subclass.from_params. It's possible that it's just the "free"
            # implementation here, in which case it accepts `**extras` and we are not able
            # to make any assumptions about what extra parameters it needs.
            #
            # It's also possible that it has a custom `from_params` method. In that case it
            # won't accept any **extra parameters and we'll need to filter them out.
            if not takes_arg(subclass.from_params, 'extras'):
                # Necessarily subclass.from_params is a custom implementation, so we need to
                # pass it only the args it's expecting.
                extras = {
                    k: v
                    for k, v in extras.items()
                    if takes_arg(subclass.from_params, k)
                }

            return subclass.from_params(params=params, **extras)
        else:
            # This is not a base class, so convert our params and extras into a dict of kwargs.

            if cls.__init__ == object.__init__:
                # This class does not have an explicit constructor, so don't give it any kwargs.
                # Without this logic, create_kwargs will look at object.__init__ and see that
                # it takes *args and **kwargs and look for those.
                kwargs: Dict[str, Any] = {}
            else:
                # This class has a constructor, so create kwargs for it.
                kwargs = create_kwargs(cls, params, **extras)

            return cls(**kwargs)  # type: ignore
コード例 #17
0
                                             params.align_function)
        output_dir = os.path.join(params.datasets_dir, align_output_dir)
    dataset.deploy(output_dir, deploy_files)


if __name__ == '__main__':

    parser = argparse.ArgumentParser(
        description='running experiments on multimodal datasets.')
    parser.add_argument('-config',
                        action='store',
                        dest='config_file',
                        help='please enter configuration file.',
                        default='config/extract.ini')
    args = parser.parse_args()
    params = Params()
    params.parse_config(args.config_file)
    params.config_file = args.config_file
    mode = 'extract'
    if 'mode' in params.__dict__:
        mode = params.mode

    if mode == 'download':
        download(params)

    elif mode == 'align':
        align(params)

    elif mode == 'extract':
        reader = dataset.setup(params)
        reader.read_data_from_sdk()
コード例 #18
0
def main(dataset_name,
         experiment_name,
         train_model=False,
         evaluate_model=False,
         compute_dset_mean_std=False,
         compute_dset_gt_bounds=False):
    """
    Args:
    dataset_name: str - name of the dataset -> from constants.py
    experiment_name: str - name of the experiment folder
    train_model: bool - begin trainig if true
    evaluate_model: bool - run validation if true
    compute_dset_mean_std: bool - compute mean and std of the dataset
    compute_dset_gt_bounds: bool - compute coordinates of an encompassing box of the labelled area
    of the training set
    """
    config = Params(constants.config_path.format(dataset_name,
                                                 experiment_name))
    params = Params(constants.params_path.format(dataset_name,
                                                 experiment_name))
    stats = Params(constants.stats_path.format(dataset_name, experiment_name))
    validate_params(params)
    prints.show_training_info(params)

    training_dataloader, validation_dataloader = training_setup.prepare_dataloaders(
        params, config)
    prints.print_dataset_stats(training_dataloader, validation_dataloader)

    model = training_setup.model_setup(params, config)
    optimizer = training_setup.optimizer_setup(model, params)
    start_epoch = 0
    if hasattr(params, "load_type"):
        start_epoch = training_setup.load_model(model, optimizer, params,
                                                dataset_name, experiment_name)
    else:
        if amp_available and general_config.use_amp:
            model, optimizer = amp.initialize(
                model, optimizer, opt_level=general_config.amp_opt_level)
    prints.print_trained_parameters_count(model, optimizer)

    experiment_info = prints.create_tensorboard_name(dataset_name,
                                                     experiment_name, params)

    if config.model_id in constants.segmentor_ids:
        model_trainer = train.Segmentation_Trainer(
            model=model,
            training_dataloader=training_dataloader,
            validation_dataloader=validation_dataloader,
            optimizer=optimizer,
            params=params,
            config=config,
            stats=stats,
            start_epoch=start_epoch,
            dataset_name=dataset_name,
            experiment_info=experiment_info,
            experiment_name=experiment_name)
    elif config.model_id in constants.detectors:
        model_trainer = train.Detector_Trainer(
            model=model,
            training_dataloader=training_dataloader,
            validation_dataloader=validation_dataloader,
            optimizer=optimizer,
            params=params,
            config=config,
            stats=stats,
            start_epoch=start_epoch,
            dataset_name=dataset_name,
            experiment_info=experiment_info,
            experiment_name=experiment_name)
    if train_model:
        model_trainer.train()

    if evaluate_model:
        model_trainer.evaluate(start_epoch, no_saving=True)

    if compute_dset_mean_std:
        print("Computing dataset mean and std!")
        images = training_dataloader.dataset.get_images()
        mean, std = data_normalization.per_dataset_norm(images)
        print("Dataset mean and standard deviation: ", mean, std)

    if compute_dset_gt_bounds:
        roi_crop.get_dataset_gt_bounds(dataset_name, params, config)
コード例 #19
0
    def __init__(self, device, params=Params(), template_mesh=None):
        super().__init__()

        self.device = device
        self.params = params
        self.mesh_scale = params.mesh_sphere_scale
        self.ico_level = params.mesh_sphere_level
        self.is_real_data = params.is_real_data
        self.init_pose_R = None
        self.init_pose_t = None

        # Create a source mesh
        if not template_mesh:
            template_mesh = ico_sphere(params.mesh_sphere_level, device)
            template_mesh.scale_verts_(params.mesh_sphere_scale)

        # for EVIMO data, we need to apply a delta Transform to adjust the pose in the EVIMO coordinate system
        # to PyTorch3D system
        # Since we don't know the initial transform, we optimize the initial pose as a parameter while render the mesh
        # initialize the delta Transform
        if params.is_real_data:
            init_trans = Transform3d(device=device)
            R_init = init_trans.get_matrix()[:, :3, :3]
            qua_init = matrix_to_quaternion(R_init)
            random_noise = (torch.randn(qua_init.shape) /
                            params.mesh_pose_init_noise_var).to(self.device)
            qua_init += random_noise

            t_init = init_trans.get_matrix()[:, 3:, :3]
            random_noise_t = (torch.randn(t_init.shape) /
                              params.mesh_pose_init_noise_var).to(self.device)
            t_init += random_noise_t

            self.register_parameter('init_camera_R',
                                    nn.Parameter(qua_init).to(self.device))
            self.register_parameter('init_camera_t',
                                    nn.Parameter(t_init).to(self.device))

        verts, faces = template_mesh.get_mesh_verts_faces(0)
        # Initialize each vert to have no tetxture
        verts_rgb = torch.ones_like(verts)[None]
        textures = TexturesVertex(verts_rgb.to(self.device))
        self.template_mesh = Meshes(
            verts=[verts.to(self.device)],
            faces=[faces.to(self.device)],
            textures=textures,
        )

        self.register_buffer("vertices", self.template_mesh.verts_padded())
        self.register_buffer("faces", self.template_mesh.faces_padded())
        self.register_buffer("textures", textures.verts_features_padded())

        deform_verts = torch.zeros_like(self.template_mesh.verts_packed(),
                                        device=device,
                                        requires_grad=True)
        # Create an optimizable parameter for the mesh
        self.register_parameter("deform_verts",
                                nn.Parameter(deform_verts).to(self.device))

        # Create optimizer
        self.optimizer = self.params.mesh_optimizer(
            self.parameters(),
            lr=self.params.mesh_learning_rate,
            betas=self.params.mesh_betas)

        self.losses = {"iou": [], "laplacian": [], "flatten": []}

        # Create a silhouette_renderer
        self.renderer = silhouette_renderer(self.params.img_size, device)
コード例 #20
0
ファイル: model.py プロジェクト: gkoumasd/MSAF
def print_result_from_dir(dir_path):
    params = Params()
    params.parse_config(os.path.join(dir_path, 'config.ini'))
    reader = open(os.path.join(dir_path,'eval'),'r')
    s = reader.readline().split()
    print('dataset: {}, network_type: {}, acc: {}, f1:{}'.format(params.dataset_name,params.network_type,s[2],s[5]))
コード例 #21
0
ファイル: from_params.py プロジェクト: shtechair/ACE
def create_kwargs(cls: Type[T], params: Params, **extras) -> Dict[str, Any]:
    """
    Given some class, a `Params` object, and potentially other keyword arguments,
    create a dict of keyword args suitable for passing to the class's constructor.

    The function does this by finding the class's constructor, matching the constructor
    arguments to entries in the `params` object, and instantiating values for the parameters
    using the type annotation and possibly a from_params method.

    Any values that are provided in the `extras` will just be used as is.
    For instance, you might provide an existing `Vocabulary` this way.
    """
    # Get the signature of the constructor.
    signature = inspect.signature(cls.__init__)
    kwargs: Dict[str, Any] = {}

    # Iterate over all the constructor parameters and their annotations.
    for name, param in signature.parameters.items():
        # Skip "self". You're not *required* to call the first parameter "self",
        # so in theory this logic is fragile, but if you don't call the self parameter
        # "self" you kind of deserve what happens.
        if name == "self":
            continue

        # If the annotation is a compound type like typing.Dict[str, int],
        # it will have an __origin__ field indicating `typing.Dict`
        # and an __args__ field indicating `(str, int)`. We capture both.
        annotation = remove_optional(param.annotation)
        origin = getattr(annotation, '__origin__', None)
        args = getattr(annotation, '__args__', [])

        # The parameter is optional if its default value is not the "no default" sentinel.
        default = param.default
        optional = default != _NO_DEFAULT

        # Some constructors expect extra non-parameter items, e.g. vocab: Vocabulary.
        # We check the provided `extras` for these and just use them if they exist.
        if name in extras:
            kwargs[name] = extras[name]

        # The next case is when the parameter type is itself constructible from_params.
        elif hasattr(annotation, 'from_params'):
            if name in params:
                # Our params have an entry for this, so we use that.
                subparams = params.pop(name)

                if takes_arg(annotation.from_params, 'extras'):
                    # If annotation.params accepts **extras, we need to pass them all along.
                    # For example, `BasicTextFieldEmbedder.from_params` requires a Vocabulary
                    # object, but `TextFieldEmbedder.from_params` does not.
                    subextras = extras
                else:
                    # Otherwise, only supply the ones that are actual args; any additional ones
                    # will cause a TypeError.
                    subextras = {
                        k: v
                        for k, v in extras.items()
                        if takes_arg(annotation.from_params, k)
                    }

                # In some cases we allow a string instead of a param dict, so
                # we need to handle that case separately.
                if isinstance(subparams, str):
                    kwargs[name] = annotation.by_name(subparams)()
                else:
                    kwargs[name] = annotation.from_params(params=subparams,
                                                          **subextras)
            elif not optional:
                # Not optional and not supplied, that's an error!
                raise ConfigurationError(
                    f"expected key {name} for {cls.__name__}")
            else:
                kwargs[name] = default

        # If the parameter type is a Python primitive, just pop it off
        # using the correct casting pop_xyz operation.
        elif annotation == str:
            kwargs[name] = (params.pop(name, default)
                            if optional else params.pop(name))
        elif annotation == int:
            kwargs[name] = (params.pop_int(name, default)
                            if optional else params.pop_int(name))
        elif annotation == bool:
            kwargs[name] = (params.pop_bool(name, default)
                            if optional else params.pop_bool(name))
        elif annotation == float:
            kwargs[name] = (params.pop_float(name, default)
                            if optional else params.pop_float(name))

        # This is special logic for handling types like Dict[str, TokenIndexer], which it creates by
        # instantiating each value from_params and returning the resulting dict.
        elif origin in (Dict, dict) and len(args) == 2 and hasattr(
                args[-1], 'from_params'):
            value_cls = annotation.__args__[-1]

            value_dict = {}

            for key, value_params in params.pop(name, Params({})).items():
                value_dict[key] = value_cls.from_params(params=value_params,
                                                        **extras)

            kwargs[name] = value_dict

        else:
            # Pass it on as is and hope for the best.   ¯\_(ツ)_/¯
            if optional:
                kwargs[name] = params.pop(name, default)
            else:
                kwargs[name] = params.pop(name)

    params.assert_empty(cls.__name__)
    return kwargs
コード例 #22
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "val_json",
        type=str,
        help=
        "Directory of validation json file which indictates the best epoch.")
    parser.add_argument("eval_iter",
                        type=int,
                        default=5,
                        help="Number of times to train and evaluate model")
    args = parser.parse_args()

    with open(args.val_json) as f:
        model_params = json.load(f)

    params = Params("hparams.yaml", model_params["model"])
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
    os.environ["CUDA_VISIBLE_DEVICES"] = params.gpu_vis_dev
    use_gpu = torch.cuda.is_available()
    device = torch.device("cuda" if use_gpu else "cpu")

    log_dir = os.path.join(params.log_dir, "eval_logs")
    if not os.path.exists(log_dir): os.makedirs(log_dir)
    model_module = __import__('.'.join(['models', params.model_name]),
                              fromlist=['object'])
    Dataset = getattr(Datasets, params.dataset_class)

    roc_auc_scores = []
    for iter_i in range(args.eval_iter):
        print("Training model for iteration {}...".format(iter_i))
        model = model_module.net().to(device)
        train = model_module.train
        test = model_module.test

        optimizer = optim.Adam(model.parameters(), lr=model_params['lr'])
        train_data_dir = os.path.join(params.data_dir, "train")
        Dataset = getattr(Datasets, params.dataset_class)

        augments_train = getattr(model_augments, params.augments_train)()
        train_data = Dataset(params.data_dir + "/train",
                             augmentations=augments_train)

        train_loader = DataLoader(train_data,
                                  batch_size=params.batch_size,
                                  shuffle=True)
        if not os.path.exists(params.checkpoint_dir):
            os.makedirs(params.checkpoint_dir)
        for epoch in range(1, model_params["best_val_epoch"] + 1):
            train(model, device, train_loader, optimizer)
        # Just save the last epoch of each iteration.
        torch.save(
            model.state_dict(),
            os.path.join(
                params.checkpoint_dir, "checkpoint_{}_epoch_{}_iter_{}".format(
                    model_params["model"], epoch, iter_i)))
        print("Evaluating model for iteration {}...".format(iter_i))

        test_data_dir = os.path.join(params.data_dir, "test.csv")
        augments_val = getattr(model_augments, params.augments_val)()

        test_data = Dataset(params.data_dir + "/test",
                            augmentations=augments_val)
        test_loader = DataLoader(test_data,
                                 batch_size=params.batch_size,
                                 shuffle=False)

        roc_auc_score = test(model, device, test_loader)
        print("ROC AUC for iteration {}\t {}".format(iter_i, roc_auc_score))

        roc_auc_scores.append(float(roc_auc_score))
    logs = {
        "model": model_params["model"],
        "num_epochs": model_params["best_val_epoch"],
        "lr": model_params['lr'],
        "batch_size": model_params["batch_size"],
        "eval_iterations": args.eval_iter,
        "roc_auc_scores": roc_auc_scores,
        "mean_roc_auc": float(np.mean(roc_auc_scores)),
        "var_roc_auc": float(np.var(roc_auc_scores)),
    }

    with open(
            os.path.join(
                log_dir, "{}_{}.json".format(model_params["model"],
                                             args.eval_iter)), 'w') as f:
        json.dump(logs, f)