示例#1
0
def run(args):
    distributed, device_ids = main_util.init_distributed_mode(
        args.world_size, args.dist_url)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    if device.type == 'cuda':
        cudnn.benchmark = True

    print(args)
    config = yaml_util.load_yaml_file(args.config)
    train_loader, valid_loader, test_loader = main_util.get_data_loaders(
        config, distributed)
    if 'mimic_model' in config:
        model = mimic_util.get_mimic_model_easily(config, device)
        model_config = config['mimic_model']
    else:
        model = module_util.get_model(config, device)
        model_config = config['model']

    model_type, best_valid_acc, start_epoch, ckpt_file_path =\
        module_util.resume_from_ckpt(model, model_config, args.init)
    train_config = config['train']
    criterion_config = train_config['criterion']
    criterion = func_util.get_loss(criterion_config['type'],
                                   criterion_config['params'])
    if not args.evaluate:
        train(model, train_loader, valid_loader, best_valid_acc, criterion,
              device, distributed, device_ids, train_config, args.epoch,
              start_epoch, args.lr, ckpt_file_path, model_type)
    test(model, test_loader, device)
def run(args):
    device = torch.device('cuda' if torch.cuda.is_available() and not args.cpu else 'cpu')
    if device.type == 'cuda':
        cudnn.benchmark = True

    config = yaml_util.load_yaml_file(args.config)
    dataset_config = config['dataset']
    train_config = config['train']
    test_config = config['test']
    compress_config = test_config['compression']
    input_shape = config['input_shape']
    train_loader, valid_loader, test_loader =\
        dataset_util.get_data_loaders(dataset_config, batch_size=train_config['batch_size'],
                                      compression_type=compress_config['type'], compressed_size=compress_config['size'],
                                      rough_size=train_config['rough_size'], reshape_size=input_shape[1:3],
                                      test_batch_size=test_config['batch_size'], jpeg_quality=test_config['jquality'])

    pickle_file_path = args.pkl
    if not file_util.check_if_exists(pickle_file_path):
        model = module_util.get_model(config, device)
        resume_from_ckpt(model, config['model'], device)
    else:
        model = file_util.load_pickle(pickle_file_path).to(device)

    analysis_mode = args.mode
    model.eval()
    if analysis_mode == 'comp_rate':
        analyze_compression_rate(model, input_shape, test_loader, device)
    elif analysis_mode == 'run_time':
        analyze_running_time(model, input_shape, args.comp_layer, test_loader, device)
    else:
        raise ValueError('mode argument `{}` is not expected'.format(analysis_mode))
def read_config(config_file_path):
    config = yaml_util.load_yaml_file(config_file_path)
    if config['model']['type'] == 'inception_v3':
        config['model']['params']['aux_logits'] = False

    model = module_util.get_model(config, torch.device('cpu'))
    model_type = config['model']['type']
    input_shape = config['input_shape']
    return model, model_type, input_shape
示例#4
0
def get_org_model(teacher_model_config, device):
    teacher_config = yaml_util.load_yaml_file(teacher_model_config['config'])
    if teacher_config['model']['type'] == 'inception_v3':
        teacher_config['model']['params']['aux_logits'] = False

    model = module_util.get_model(teacher_config, device)
    model_config = teacher_config['model']
    resume_from_ckpt(model_config['ckpt'], model)
    return model, model_config['type']
示例#5
0
def get_teacher_model(teacher_model_config, input_shape, device):
    teacher_config = yaml_util.load_yaml_file(teacher_model_config['config'])
    model_config = teacher_config['model']
    if model_config['type'] == 'inception_v3':
        model_config['params']['aux_logits'] = False

    model = module_util.get_model(teacher_config, device)
    resume_from_ckpt(model_config['ckpt'], model)
    return extract_teacher_model(model, input_shape, device,
                                 teacher_model_config), model_config['type']
示例#6
0
def get_head_model(config, input_shape, device):
    org_model_config = config['org_model']
    model_config = yaml_util.load_yaml_file(org_model_config['config'])
    sub_model_config = model_config['model']
    if sub_model_config['type'] == 'inception_v3':
        sub_model_config['params']['aux_logits'] = False

    model = module_util.get_model(model_config, device)
    module_util.resume_from_ckpt(model, sub_model_config, False)
    return extract_head_model(model, input_shape, device,
                              org_model_config['partition_idx'])
示例#7
0
def get_extended_model(autoencoder,
                       config,
                       input_shape,
                       device,
                       skip_bottleneck_size=False):
    org_model_config = config['org_model']
    model_config = yaml_util.load_yaml_file(org_model_config['config'])
    sub_model_config = model_config['model']
    if sub_model_config['type'] == 'inception_v3':
        sub_model_config['params']['aux_logits'] = False

    model = module_util.get_model(model_config, device)
    module_util.resume_from_ckpt(model, sub_model_config, False)
    return extend_model(autoencoder, model, input_shape, device,
                        org_model_config['partition_idx'],
                        skip_bottleneck_size), model
def run(args):
    print(args)
    config = yaml_util.load_yaml_file(args.config)
    sensor_device = torch.device('cpu' if args.scpu else 'cuda')
    edge_device = torch.device('cpu' if args.ecpu else 'cuda')
    partition_idx = args.partition
    head_output_file_path = args.head
    tail_output_file_path = args.tail
    input_shape = config['input_shape']
    if 'teacher_model' not in config:
        model = module_util.get_model(
            config,
            torch.device('cuda') if torch.cuda.is_available() else None)
        module_util.resume_from_ckpt(model, config['model'], False)
    else:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        model, teacher_model_type =\
            mimic_util.get_org_model(config['teacher_model'], device)
        if args.org and head_output_file_path is not None and tail_output_file_path is not None:
            split_original_model(model, input_shape, device, config,
                                 sensor_device, edge_device, partition_idx,
                                 head_output_file_path, tail_output_file_path,
                                 args.test, args.spbit)
        elif args.mimic:
            model = mimic_util.get_mimic_model_easily(config, sensor_device)
            student_model_config = config['mimic_model']
            load_ckpt(student_model_config['ckpt'], model=model, strict=True)
            split_original_model(model, input_shape, device, config,
                                 sensor_device, edge_device, partition_idx,
                                 head_output_file_path, tail_output_file_path,
                                 args.test, args.spbit)
        elif head_output_file_path is not None and tail_output_file_path is not None:
            split_within_student_model(model, input_shape, device, config,
                                       teacher_model_type, sensor_device,
                                       edge_device, partition_idx,
                                       head_output_file_path,
                                       tail_output_file_path, args.test,
                                       args.spbit)

    if args.model is not None and args.device is not None:
        convert_model(model, torch.device(args.device), args.model)
def run(args):
    device = torch.device(
        'cuda' if torch.cuda.is_available() and not args.cpu else 'cpu')
    if device.type == 'cuda':
        cudnn.benchmark = True

    config = yaml_util.load_yaml_file(args.config)
    train_loader, valid_loader, test_loader = main_util.get_data_loaders(
        config, False)
    input_shape = config['input_shape']
    if 'mimic_model' in config:
        model = mimic_util.get_mimic_model_easily(config, device)
        model_config = config['mimic_model']
    else:
        model = module_util.get_model(config, device)
        model_config = config['model']

    model_type, _, _, _ = module_util.resume_from_ckpt(model, model_config,
                                                       False)
    split_name = args.split
    data_loader = train_loader if split_name == 'train' else valid_loader if split_name == 'valid' else test_loader
    analyze_with_mean_inputs(model, input_shape, data_loader, device,
                             split_name, args.method, args.dim, model_type,
                             args.output)