示例#1
0
    def __init__(self, config_path, gpu, model_path):
        # gpu
        self.use_gpu = True
        if gpu is None:
            self.use_gpu = False
        else:
            self.use_gpu = True
            os.environ["CUDA_VISIBLE_DEVICES"] = gpu

        # config
        self.config = ConfigParser(config_path)

        # formatter
        useable_list = {
            # "AYYC": AYPredictionFormatter
            "DeepFM": DeepFMFormatter,
            "LRMM": MusicPairFormatter,
            "DeepInterest": DeepInterestFormatter,
            "NCF": NCFFormatter
        }
        if self.config.get("data", "formatter") in useable_list.keys():
            self.formatter = useable_list[self.config.get(
                "data", "formatter")](self.config)
        else:
            raise NotImplementedError
        task_loss_type = self.config.get("train", "type_of_loss")
        self.criterion = get_loss(task_loss_type)

        #model
        model_name = self.config.get("model", "name")
        net = get_model(model_name, self.config)
        device = []
        if torch.cuda.is_available() and self.use_gpu:
            net = net.cuda()
        net.load_state_dict(torch.load(model_path))
        self.net = net
        print_info("Net build done")
示例#2
0
if configFilePath is None:
    print("python *.py\t--config/-c\tconfigfile")
use_gpu = True

if args.gpu is None:
    use_gpu = False
else:
    use_gpu = True
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu

config = ConfigParser(configFilePath)

print_info("Start to build Net")

model_name = config.get("model", "name")
net = get_model(model_name, config)
print_info(model_name)

device = []
print_info("CUDA:%s" % str(torch.cuda.is_available()))
if torch.cuda.is_available() and use_gpu:
    device_list = args.gpu.split(",")
    for a in range(0, len(device_list)):
        device.append(int(a))

    print_info('begin .cuda()')
    net = net.cuda()
    print_info('cuda() successfully')

    try:
        net.init_multi_gpu(device)
示例#3
0
# GPU settings
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True

if __name__ == '__main__':

    # Get dataset (train and validation)
    dataset = get_dataset(dataset_name=args.task,
                          data_root=args.dataroot,
                          mode='train')

    # Get corresponding model for task
    model = get_model(args, dataset['train'], pretrained=False)

    # if GPUs are available, move model to cuda
    if model.cuda_available:
        model = model.cuda()

    # try to load model (if any)
    if args.load_directory:
        model.load(log_directory=args.load_directory,
                   snapshot_num=args.snapshot_num)

    # initialise model trainer and train
    trainer = Trainer(args)
    if args.task == 'captioning':
        trainer.train_captioning(args, model, dataset)
    elif args.task == 'vqa':
示例#4
0
    distributed = num_gpus > 1
    if args.cuda and torch.cuda.is_available():
        cudnn.benchmark = True
        device = torch.device('cuda')
    else:
        distributed = False

    if distributed:
        torch.cuda.set_device(args.local_rank)
        torch.distributed.init_process_group(backend="nccl", init_method=args.init_method)

    # Load Model
    model_name = args.model
    kwargs = {'classes': 1000, 'pretrained': args.pretrained, }

    net = get_model(model_name, **kwargs)
    net.to(device)

    # testing data
    acc_top1 = Accuracy()
    acc_top5 = TopKAccuracy(5)
    val_data = get_dataloader(args, distributed)

    # testing
    acc_top1, acc_top5 = validate(net, val_data, device, acc_top1, acc_top5)
    synchronize()
    name1, top1 = accumulate_metric(acc_top1)
    name5, top5 = accumulate_metric(acc_top5)
    if is_main_process():
        print('%s: %f, %s: %f' % (name1, top1, name5, top5))
示例#5
0
args.save_directory = os.path.join(args.save_directory, args.name)

# GPU settings
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.benchmark = True

if __name__ == '__main__':

    # Get dataset (testing)
    dataset = get_dataset(dataset_name=args.task, data_root=args.dataroot, mode='test')

    # Get corresponding model for task
    model = get_model(args, dataset['val'], pretrained=True)

    # if GPUs are available, move model to cuda
    if model.cuda_available:
        model = model.cuda()

    # try to load model (if any)
    if args.load_directory:
        model.load(log_directory=args.load_directory, snapshot_num=args.snapshot_num)

    # initialise model trainer and train
    evaluator = Evaluator(args)
    if args.task.lower() == 'captioning':
        evaluator.sample_captioning(model, dataset['test'])
    elif args.task.lower() == 'vqa':
        evaluator.sample_vqa(model, dataset['val'])
    to_save = config.PRED_SAVE_DIR + '{}_val.csv'.format(savename)
    print(f'Saving predictions to {to_save}')
    result.to_csv(to_save)


if __name__ == "__main__":
    # print({item:getattr(config, item) for item in dir(config) if not item.startswith("__")})
    print("Loading data....")
    data, labid_to_id, padding_idx = data_reader.load_data(config.DATA_DIR +
                                                           'data.csv',
                                                           train=False,
                                                           val=True)
    target_size = len(labid_to_id)

    print("Building model...")
    MODEL = get_model(config.MODEL)
    model = MODEL(target_size,
                  rv_comp=config.USE_RC,
                  metadata=config.METADATA,
                  padding_idx=padding_idx)
    model.to(config.DEVICE)

    if config.MODEL_NAME_SUFFIX != '':
        savename = config.MODEL + '_' + config.MODEL_NAME_SUFFIX
    else:
        savename = config.MODEL
    get_predictions(data,
                    model,
                    labid_to_id,
                    n_folds=config.N_FOLDS,
                    savename=savename)