Exemple #1
0
def load(opt):
    model_opt = filter_opt(opt, 'model')
    model_name = model_opt['model_name']

    del model_opt['model_name']

    return get_model(model_name, model_opt)
def load(opt):
    """
    Load the model for the options
    """
    model_opt = filter_opt(opt, 'model')
    model_name = model_opt['model_name']

    del model_opt['model_name']

    return get_model(model_name, model_opt)
def main(opt):
    # load model
    model = model_utils.load(opt)
    state_dict = torch.load(opt['model.model_path'])
    model.load_state_dict(state_dict)
    model.eval()

    # load opts
    model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']), 'opt.json')
    with open(model_opt_file, 'r') as f:
        model_opt = json.load(f)

    # Postprocess arguments
    model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(','))
    model_opt['log.fields'] = model_opt['log.fields'].split(',')

    # construct data
    data_opt = { 'data.' + k: v for k,v in filter_opt(model_opt, 'data').items() }

    episode_fields = {
        'data.test_way': 'data.way',
        'data.test_shot': 'data.shot',
        'data.test_query': 'data.query',
        'data.test_episodes': 'data.train_episodes'
    }

    for k,v in episode_fields.items():
        if opt[k] != 0:
            data_opt[k] = opt[k]
        elif model_opt[k] != 0:
            data_opt[k] = model_opt[k]
        else:
            data_opt[k] = model_opt[v]

    print("Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes".format(
        data_opt['data.test_way'], data_opt['data.test_shot'],
        data_opt['data.test_query'], data_opt['data.test_episodes']))

    torch.manual_seed(1234)
    if data_opt['data.cuda']:
        torch.cuda.manual_seed(1234)

    data = data_utils.load(data_opt, ['test'])

    if data_opt['data.cuda']:
        model.cuda()

    meters = { field: tnt.meter.AverageValueMeter() for field in model_opt['log.fields'] }

    model_utils.evaluate(model, data['test'], meters, desc="test")

    for field,meter in meters.items():
        mean, std = meter.value()
        print("test {:s}: {:0.6f} +/- {:0.6f}".format(field, mean, 1.96 * std / math.sqrt(data_opt['data.test_episodes'])))
Exemple #4
0
def loader(opt, splits):

    ret = {}
    for split in splits:
        if split in ['val', 'test'] and opt['data.test_way'] != 0:
            n_way = opt['data.test_way']
        else:
            n_way = opt['data.way']

        if split in ['val', 'test'] and opt['data.test_shot'] != 0:
            n_support = opt['data.test_shot']
        else:
            n_support = opt['data.shot']

        if split in ['val', 'test'] and opt['data.test_query'] != 0:
            n_query = opt['data.test_query']
        else:
            n_query = opt['data.query']

        if split in ['val', 'test']:
            n_episodes = opt['data.test_episodes']
        else:
            n_episodes = opt['data.train_episodes']

        if opt['data.dataset'] == 'googlespeech':
            speech_args = filter_opt(opt, 'speech')
            data_dir = os.path.join(os.path.dirname(__file__),
                                    '../../data/speech_commands/core')
            class_file = os.path.join(os.path.dirname(__file__),
                                      '../../data/speech_commands/core',
                                      split + '.txt')
            ds = FewShotSpeechDataset(data_dir, class_file, n_support, n_query,
                                      opt['data.cuda'], speech_args)

        if opt['data.sequential']:
            sampler = SequentialBatchSampler(len(ds))
        else:

            sampler = EpisodicSpeechBatchSampler(
                len(ds),
                n_way,
                n_episodes,
                include_silence=opt['speech.include_silence'],
                include_unknown=opt['speech.include_unknown'])

        # use num_workers=0, otherwise may receive duplicate episodes
        ret[split] = torch.utils.data.DataLoader(ds,
                                                 batch_sampler=sampler,
                                                 num_workers=0)

    return ret
Exemple #5
0
def main(opt):
    # load model
    model = torch.load(opt['model.model_path'])
    # def get_image_input_hook(self, input, output):
    #     # print(len(input))
    #     # print(type(input))
    #     # print(len(output))
    #     # print(type(output))
    #     print("in:",input[0].data.cpu().numpy()[0].shape)
    #     print("out:",output.data.cpu().numpy()[0].shape)
    #     viz.image(input[0].data.cpu().numpy()[0])
    #     for i in output.data.cpu().numpy()[0]:
    #         viz.image(i)
    # model.encoder[0][0].register_forward_hook(get_image_input_hook)
    model.eval()

    # load opts
    model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']),
                                  'opt.json')
    with open(model_opt_file, 'r') as f:
        model_opt = json.load(f)

    # Postprocess arguments
    model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(','))
    model_opt['log.fields'] = model_opt['log.fields'].split(',')

    # construct data
    data_opt = {
        'data.' + k: v
        for k, v in filter_opt(model_opt, 'data').items()
    }

    episode_fields = {
        'data.test_way': 'data.way',
        'data.test_shot': 'data.shot',
        'data.test_query': 'data.query',
        'data.test_episodes': 'data.train_episodes'
    }

    for k, v in episode_fields.items():
        if opt[k] != 0:
            data_opt[k] = opt[k]
        elif model_opt[k] != 0:
            data_opt[k] = model_opt[k]
        else:
            data_opt[k] = model_opt[v]

    print(
        "Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes"
        .format(data_opt['data.test_way'], data_opt['data.test_shot'],
                data_opt['data.test_query'], data_opt['data.test_episodes']))

    torch.manual_seed(1234)
    if data_opt['data.cuda']:
        torch.cuda.manual_seed(1234)
    print(data_opt)
    data = data_utils.load(data_opt, ['test'])

    if data_opt['data.cuda']:
        model.cuda()

    meters = {
        field: tnt.meter.AverageValueMeter()
        for field in model_opt['log.fields']
    }

    model_utils.evaluate(model, data['test'], meters, desc="test")

    for field, meter in meters.items():
        mean, std = meter.value()
        print("test {:s}: {:0.6f} +/- {:0.6f}".format(
            field, mean,
            1.96 * std / math.sqrt(data_opt['data.test_episodes'])))
Exemple #6
0
def load(opt, key='model_name'):
    model_opt = filter_opt(opt, 'model')
    model_name = model_opt[key]

    del model_opt[key]
    return get_model(model_name, model_opt)
Exemple #7
0
def main(opt):
    # load model
    model = torch.load(opt['model.model_path'])
    model.eval()

    # load opts
    model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']),
                                  'opt.json')
    with open(model_opt_file, 'r') as f:
        model_opt = json.load(f)

    # Postprocess arguments
    model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(','))
    model_opt['log.fields'] = model_opt['log.fields'].split(',')

    # construct data
    data_opt = {
        'data.' + k: v
        for k, v in filter_opt(model_opt, 'data').items()
    }

    episode_fields = {
        'data.test_way': 'data.way',
        'data.test_shot': 'data.shot',
        'data.test_query': 'data.query',
        'data.test_episodes': 'data.train_episodes'
    }

    for k, v in episode_fields.items():
        if opt[k] != 0:
            data_opt[k] = opt[k]
        elif model_opt[k] != 0:
            data_opt[k] = model_opt[k]
        else:
            data_opt[k] = model_opt[v]

    print(
        "Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes"
        .format(data_opt['data.test_way'], data_opt['data.test_shot'],
                data_opt['data.test_query'], data_opt['data.test_episodes']))

    torch.manual_seed(1234)
    if data_opt['data.cuda']:
        torch.cuda.manual_seed(1234)

    data = data_utils.load(data_opt, ['test'])

    if data_opt['data.cuda']:
        model.cuda()

    meters = {
        field: tnt.meter.AverageValueMeter()
        for field in model_opt['log.fields']
    }
    if opt['stage'] == 'protonet':
        _, class_acc, class_prec, prec_micro = model_utils.evaluate(
            model,
            data['test'],
            meters,
            stage='protonet',
            desc="test",
            evaluation=True)
    else:
        _, class_acc, class_prec, prec_micro = model_utils.evaluate(
            model,
            data['test'],
            meters,
            stage='feat',
            desc='test',
            evaluation=True)
    for field, meter in meters.items():
        mean, std = meter.value()
        print("test {:s}: {:0.6f} +/- {:0.6f}".format(
            field, mean,
            1.96 * std / math.sqrt(data_opt['data.test_episodes'])))

    mean_prec = 0
    n = 0
    for k in class_acc.keys():
        print('class {} acc: {:0.4f}'.format(k, class_acc[k]))
    for k in class_prec.keys():
        mean_prec += class_prec[k]
        n += 1
        print('class {} prec: {:0.4f}'.format(k, class_prec[k]))
    mean_prec = mean_prec / n
    print('Average prec(macro): {:0.4f}; Average prec(micro): {:0.4f}'.format(
        mean_prec, prec_micro))
Exemple #8
0
def main(opt, augment_stn):
    # load model
    model = torch.load(opt['model.model_path'])
    model.eval()

    # load opts
    model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']),
                                  'opt.json')
    with open(model_opt_file, 'r') as f:
        model_opt = json.load(f)

    # Postprocess arguments
    model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(','))
    model_opt['log.fields'] = model_opt['log.fields'].split(',')
    if model_opt['run_stn']:
        print("Loading STN here")
        try:
            stn_model = torch.load(opt['model.model_path'].replace(
                '.pt', '_stn.pt'))
            stn_model.eval()
        except:
            stn_model = None
    else:
        stn_model = None

    # Augment overrides it anyway
    if not augment_stn:
        stn_model = None

    # construct data
    data_opt = {
        'data.' + k: v
        for k, v in filter_opt(model_opt, 'data').items()
    }

    episode_fields = {
        'data.test_way': 'data.way',
        'data.test_shot': 'data.shot',
        'data.test_query': 'data.query',
        'data.test_episodes': 'data.train_episodes'
    }

    for k, v in episode_fields.items():
        if opt[k] != 0:
            data_opt[k] = opt[k]
        elif model_opt[k] != 0:
            data_opt[k] = model_opt[k]
        else:
            data_opt[k] = model_opt[v]

    print(
        "Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes"
        .format(data_opt['data.test_way'], data_opt['data.test_shot'],
                data_opt['data.test_query'], data_opt['data.test_episodes']))

    torch.manual_seed(opt['seed'])
    if data_opt['data.cuda']:
        torch.cuda.manual_seed(opt['seed'])

    data = data_utils.load(data_opt, ['test'])

    if data_opt['data.cuda']:
        model.cuda()
        if stn_model is not None:
            stn_model.cuda()

    meters = {
        field: tnt.meter.AverageValueMeter()
        for field in model_opt['log.fields']
    }

    model_utils.evaluate(model, stn_model, data['test'], meters, desc="test")

    for field, meter in meters.items():
        mean, std = meter.value()
        print("test {:s}: {:0.6f} +/- {:0.6f}".format(
            field, mean,
            1.96 * std / math.sqrt(data_opt['data.test_episodes'])))
def main(opt):
    # load model
    #model = torch.load(opt['model.model_path'])
    print('opt:', opt)
    model = model_utils.load(opt)
    model.eval()

    # load opts
    model_opt_file = os.path.join(os.path.dirname(opt['model.model_path']),
                                  'opt.json')
    with open(model_opt_file, 'r') as f:
        model_opt = json.load(f)

    # Postprocess arguments
    model_opt['model.x_dim'] = map(int, model_opt['model.x_dim'].split(','))
    model_opt['log.fields'] = model_opt['log.fields'].split(',')

    # construct data
    data_opt = {
        'data.' + k: v
        for k, v in filter_opt(model_opt, 'data').items()
    }

    episode_fields = {
        'data.test_way': 'data.way',
        'data.test_shot': 'data.shot',
        'data.test_query': 'data.query',
        'data.test_episodes': 'data.train_episodes'
    }

    for k, v in episode_fields.items():
        if opt[k] != 0:
            data_opt[k] = opt[k]
        elif model_opt[k] != 0:
            data_opt[k] = model_opt[k]
        else:
            data_opt[k] = model_opt[v]

    # added for birds dataset
    data_opt['data.test_way'] = 5
    data_opt['data.test_shot'] = 5
    data_opt['data.test_query'] = (41 - 5)  # max amount possible
    data_opt[
        'data.test_episodes'] = 1000  # average over 1000 randomly generated episodes

    print(
        "Evaluating {:d}-way, {:d}-shot with {:d} query examples/class over {:d} episodes"
        .format(data_opt['data.test_way'], data_opt['data.test_shot'],
                data_opt['data.test_query'], data_opt['data.test_episodes']))

    torch.manual_seed(1234)
    if data_opt['data.cuda']:
        torch.cuda.manual_seed(1234)

    #data = data_utils.load(data_opt, ['test'])
    test_way = data_opt['data.test_way']
    test_shot = data_opt['data.test_shot']
    test_query = data_opt['data.test_query']
    test_episodes = data_opt['data.test_episodes']
    print('test way:', test_way, 'test shot:', test_shot, 'test query:',
          test_query, 'test_episodes', test_episodes)
    # figure out how to go through all the data with 5 shot
    data = PytorchBirdsDataLoader(n_episodes=test_episodes,
                                  n_way=test_way,
                                  n_query=test_query,
                                  n_support=test_shot)

    if data_opt['data.cuda']:
        model.cuda()

    meters = {
        field: tnt.meter.AverageValueMeter()
        for field in model_opt['log.fields']
    }

    model_utils.evaluate(model, data, meters, desc="test")

    for field, meter in meters.items():
        mean, std = meter.value()
        print("test {:s}: {:0.6f} +/- {:0.6f}".format(
            field, mean,
            1.96 * std / math.sqrt(data_opt['data.test_episodes'])))