예제 #1
0
def main():
    args = get_args()
    config = process_config(args.config)

    # create the experiments dirs
    create_dirs([
        config["callbacks"]["tensorboard_log_dir"],
        config["callbacks"]["checkpoint_dir"]
    ])

    # resolve classes
    data_loader_cls = factory("data_loaders.{}".format(
        config["data_loader"]["name"]))
    model_cls = factory("models.{}".format(config["model"]["name"]))
    trainer_cls = factory("trainers.{}".format(config["trainer"]["name"]))

    # create data generators for the data sets
    loader_params = config["data_loader"]
    training_generator = data_loader_cls("train",
                                         shuffle=True,
                                         **loader_params)
    eval_generator = data_loader_cls("eval", **loader_params)

    # train ze model
    model_params = config['model']
    print(training_generator.feature_dim)
    model = model_cls(training_generator.feature_dim,
                      training_generator.n_classes, **model_params)
    trainer = trainer_cls(config["exp"]["name"], model.model,
                          config["callbacks"], **config["trainer"])
    trainer.train(training_generator, eval_generator)
예제 #2
0
def main():
    args = parse_args()
    config = process_config(args.config)

    # create the experiments dirs
    create_dirs([config["callbacks"]["tensorboard_log_dir"], config["callbacks"]["checkpoint_dir"]])

    # resolve classes
    data_loader_cls = factory("data_loaders.{}".format(config["data_loader"]["name"]))
    trainer_cls = factory("trainers.{}".format(config["trainer"]["name"]))

    # create data generators for the data sets
    loader_params = config["data_loader"]
    generators = {
        name: data_loader_cls(name, **loader_params)
        for name in ['train', 'eval', 'unlabeled']
    }

    # train ze model(s)
    if args.tri_training:
        model = list()
        for i in range(1, 4):
            print('Creating model {}/3'.format(i))
            model_params = config['model{}'.format(i)]
            model_cls = factory("models.{}".format(config["model{}".format(i)]["name"]))
            model.append(model_cls(generators['train'].feature_dim, generators['train'].n_classes, **model_params))
    else:
        model_params = config['model']
        model_cls = factory("models.{}".format(config["model"]["name"]))
        model = model_cls(generators['train'].feature_dim, generators['train'].n_classes, **model_params)

    trainer = trainer_cls(
        config["exp"]["name"],
        model,
        config["callbacks"],
        **config["trainer"]
    )
    if args.mode == 'train':
        trainer.train(
            generators['train'],
            generators['eval'],
            # generators['unlabeled']
            # confidence_threshold=0
        )
    elif args.mode == 'eval':
        evaluator = DCASE2019T4Evaluator(
            trainer.predict,
            generators['eval'],
            verbose=True
        )
        ret = evaluator.evaluate(evaluator.find_class_thresholds())
    else:
        # mode == 'predict'
        for data_type, fns in config['trainer']['prediction'].items():
            print('Predicting on {} data and storing at {}.'.format(data_type, fns['predictions_fn']))
            trainer.predict(
                generators[data_type],
                write_fn=path.join('experiments', config['exp']['name'], fns['predictions_fn'])
            )
예제 #3
0
    #q_dict = Dictionary.load_from_file('data/question_dictionary.pkl')
    #c_dict = Dictionary.load_from_file('data/caption_dictionary.pkl')
    q_dict = Dictionary.load_from_file('data/VQAE/question_dictionary.pkl')
    c_dict = Dictionary.load_from_file('data/VQAE/explain_dictionary.pkl')

    #train_dset = VQAFeatureDataset('train', q_dict, c_dict, args.att_thr)
    #eval_dset = VQAFeatureDataset('val', q_dict, c_dict, args.att_thr)
    train_dset = VQAEDataset('train', q_dict, c_dict, 'cache/VQAE2')
    eval_dset = VQAEDataset('val', q_dict, c_dict, 'cache/VQAE2')
    #train_dset = VQAEVQA2Dataset('train', q_dict, c_dict, 'cache')
    #eval_dset = VQAEVQA2Dataset('val', q_dict, c_dict, 'cache')
    batch_size = args.batch_size

    constructor = 'build_%s' % args.model
    model = utils.factory(constructor, train_dset, args.num_hid, args.att_dim,
                          args.decode_dim).cuda()

    model_path = os.path.join(args.output, 'model.pth')
    model_state = torch.load(model_path)
    model.load_state_dict(model_state)

    print('Model has {} parameters in total'.format(utils.params_count(model)))
    #model = nn.DataParallel(model).cuda()

    eval_loader = DataLoader(eval_dset,
                             batch_size,
                             shuffle=False,
                             num_workers=1)
    model.train(False)
    vqa_score, results = evaluate(model, eval_loader, q_dict, c_dict)
    save_obj = {'vqa_score': vqa_score, 'results': results}
예제 #4
0

def trainval():
    pass


def deploy():
    pass


def net():
    pass


if __name__ == '__main__':
    n = factory('unet')
    h = cfgs.inShape[0]
    w = cfgs.inShape[1]
    batch_size = 32
    n_filter = 32
    n.Data("/home/yihuihe/Ultrasound-Nerve-Segmentation/data/train.txt",
           backend='image',
           label="aaa",
           new_height=h,
           new_width=w,
           scale=0.01578412369702059,
           batch_size=batch_size)

    n.Deconvolution('up1', )

    n.Data("data/mask.txt",
# by yihui
import caffe
from utils import factory
import cfgs_res as cfgs

def trainval():
    pass

def deploy():
    pass

def net():
    pass

if __name__ == '__main__':
    n=factory('unet')
    h=cfgs.inShape[0]
    w=cfgs.inShape[1]
    batch_size=32
    n_filter=32
    n.Data("/home/yihuihe/Ultrasound-Nerve-Segmentation/data/train.txt",backend='image',label="aaa",new_height=h,new_width=w,scale=0.01578412369702059, batch_size=batch_size)  

    n.Deconvolution('up1',)

    n.Data("data/mask.txt",name='label',mean_file="data/mask_mean.binaryproto",backend='image',new_height=h,new_width=w,batch_size=batch_size, scale=1./255)
    n.silence('nothing','aaa')
    n.diceLoss('prob','label')

    n.totxt('resnet/addition.prototxt')

    
예제 #6
0

def trainval():
    pass


def deploy():
    pass


def net():
    pass


if __name__ == "__main__":
    n = factory("unet")
    h = cfgs.inShape[0]
    w = cfgs.inShape[1]
    batch_size = 24
    omit = 0
    layers = 5
    if use_lmdb:
        n.Data(
            "/mnt/data1/yihuihe/ultrasound-nerve/lmdb_train_val/train_data",  # mean_file='data/data_mean.binaryproto',
            mean_value=[128],
            scale=1.0 / 255,
            backend="LMDB",
        )
    else:
        n.Data(
            "data/val_mask.txt",