Esempio n. 1
0
def main():

    import argparse
    parser = argparse.ArgumentParser(
        description="Pytorch Image CNN training from Configure Files")
    parser.add_argument(
        '--config_file',
        required=True,
        help="This scripts only accepts parameters from Json files")
    input_args = parser.parse_args()

    config_file = input_args.config_file

    args = parse_config(config_file)
    if args.name is None:
        args.name = get_stem(config_file)

    torch.set_default_tensor_type('torch.FloatTensor')
    best_prec1 = 0

    args.script_name = get_stem(__file__)
    current_time_str = get_date_str()

    print_func = print

    if args.device:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.device

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    if args.pretrained:
        print_func("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True,
                                           num_classes=args.num_classes)
    else:
        print_func("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=False,
                                           num_classes=args.num_classes)

    if args.gpu is not None:
        model = model.cuda(args.gpu)
    elif args.distributed:
        model.cuda()
        model = torch.nn.parallel.DistributedDataParallel(model)
    else:
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            # model = torch.nn.DataParallel(model).cuda()
            model = model.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print_func("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            import collections
            if isinstance(checkpoint, collections.OrderedDict):
                load_state_dict(model,
                                checkpoint,
                                exclude_layers=['fc.weight', 'fc.bias'])

            else:
                load_state_dict(
                    model,
                    checkpoint['state_dict'],
                    exclude_layers=['module.fc.weight', 'module.fc.bias'])
                print_func("=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']))
        else:
            print_func("=> no checkpoint found at '{}'".format(args.resume))
            return
    else:
        print_func(
            "=> This script is for fine-tuning only, please double check '{}'".
            format(args.resume))
        print_func("Now using randomly initialized parameters!")

    cudnn.benchmark = True

    from PyUtils.pickle_utils import loadpickle
    from PublicEmotionDatasets.Deepemotion.constants import emotion2idx, idx2emotion
    from PyUtils.dict_utils import string_list2dict
    import numpy as np
    from torchvision.datasets.folder import default_loader
    tag_wordvectors = loadpickle(args.tag_embeddings)
    tag_words = []
    tag_matrix = []
    label_words = []
    label_matrix = []

    for x_tag in tag_wordvectors:
        tag_words.append(x_tag)
        tag_matrix.append(tag_wordvectors[x_tag])
        if x_tag in emotion2idx:
            label_words.append(x_tag)
            label_matrix.append(tag_wordvectors[x_tag])
    idx2tag, tag2idx = string_list2dict(tag_words)
    idx2label, label2idx = string_list2dict(label_words)
    tag_matrix = np.array(tag_matrix)
    label_matrix = np.array(label_matrix)
    label_matrix = label_matrix.squeeze(1)
    tag_matrix = tag_matrix.squeeze(1)
    val_list = loadpickle(args.val_file)
    from CNNs.datasets.multilabel import get_val_simple_transform
    val_transform = get_val_simple_transform()
    model.eval()

    correct = 0
    total = len(val_list) * 1.0
    for i, (input_image_file, target, _) in enumerate(val_list):
        # measure data loading time

        image_path = os.path.join(args.data_dir, input_image_file)
        input_image = default_loader(image_path)
        input_image = val_transform(input_image)

        if args.gpu is not None:
            input_image = input_image.cuda(args.gpu, non_blocking=True)
        input_image = input_image.unsqueeze(0).cuda()

        # target_idx = target.nonzero() [:,1]

        # compute output
        output, output_proj = model(input_image)

        output_proj = output_proj.cpu().data.numpy()

        dot_product_label = cosine_similarity(output_proj, label_matrix)[0]
        output_label = np.argmax(dot_product_label)
        if output_label == target:
            correct += 1

        dot_product_tag = cosine_similarity(output_proj, tag_matrix)[0]
        out_tags = np.argsort(dot_product_tag)[::-1][:10]

        print("* {} Image: {} GT label: {}, predicted label: {}".format(
            i, input_image_file, idx2emotion[target], idx2label[output_label]))
        print(" == closest tags: {}".format(', '.join([
            '{}({:.02f})'.format(idx2tag[x], dot_product_tag[x])
            for x in out_tags
        ])))
    print("Accuracy {:.4f}".format(correct / total))
Esempio n. 2
0
def main():

    import argparse
    parser = argparse.ArgumentParser(
        description="Pytorch Image CNN training from Configure Files")
    parser.add_argument(
        '--config_file',
        required=True,
        help="This scripts only accepts parameters from Json files")
    input_args = parser.parse_args()

    config_file = input_args.config_file

    args = parse_config(config_file)
    if args.name is None:
        args.name = get_stem(config_file)

    torch.set_default_tensor_type('torch.FloatTensor')
    # best_prec1 = 0

    args.script_name = get_stem(__file__)
    # current_time_str = get_date_str()

    print_func = print

    if args.device:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.device

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    if args.pretrained:
        print_func("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True,
                                           num_classes=args.num_classes)
    else:
        print_func("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=False,
                                           num_classes=args.num_classes)

    if args.gpu is not None:
        model = model.cuda(args.gpu)
    elif args.distributed:
        model.cuda()
        model = torch.nn.parallel.DistributedDataParallel(model)
    else:
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()
            # model = model.cuda()

    if args.visual_model:
        if os.path.isfile(args.visual_model):
            print_func("=> loading checkpoint '{}'".format(args.visual_model))
            checkpoint = torch.load(args.visual_model)
            model.load_state_dict(checkpoint['state_dict'], strict=True)
            # import collections
            # if isinstance(checkpoint, collections.OrderedDict):
            #     load_state_dict(model, checkpoint, exclude_layers=['fc.weight', 'fc.bias'])
            #
            #
            # else:
            #     load_state_dict(model, checkpoint['state_dict'], exclude_layers=['module.fc.weight', 'module.fc.bias'])
            #     print_func("=> loaded checkpoint '{}' (epoch {})"
            #           .format(args.visual_model, checkpoint['epoch']))
        else:
            print_func("=> no checkpoint found at '{}'".format(
                args.visual_model))
            return
    else:
        print_func(
            "=> This script is for fine-tuning only, please double check '{}'".
            format(args.visual_model))
        print_func("Now using randomly initialized parameters!")

    cudnn.benchmark = True

    from PyUtils.pickle_utils import loadpickle

    import numpy as np
    from PublicEmotionDatasets.Emotic.constants import emotion_full_words_690 as emotion_self_words

    from torchvision.datasets.folder import default_loader
    tag_wordvectors = loadpickle(args.text_embed)

    print_func(" => loading word2vec parameters: {}".format(args.text_embed))

    emotic_emotion_explaintations = {}

    for x_key in emotion_self_words:
        x_words = emotion_self_words[x_key].split(',')
        x_feature = [tag_wordvectors[x] for x in x_words]

        item = {}
        item['pred'] = []
        item['label'] = []
        item['target_matrix'] = np.array(x_feature)
        item['description'] = x_words
        emotic_emotion_explaintations[x_key] = item

    val_list = loadpickle(args.val_file)
    image_directory = args.data_dir
    from CNNs.datasets.multilabel import get_val_simple_transform
    val_transform = get_val_simple_transform()
    model.eval()

    import tqdm
    for i, (input_image_file, target, _,
            _) in tqdm.tqdm(enumerate(val_list),
                            desc="Evaluating Peace",
                            total=len(val_list)):
        # measure data loading time

        image_path = os.path.join(image_directory, input_image_file)
        input_image = default_loader(image_path)
        input_image = val_transform(input_image)

        if args.gpu is not None:
            input_image = input_image.cuda(args.gpu, non_blocking=True)
        input_image = input_image.unsqueeze(0).cuda()

        # target_idx = target.nonzero() [:,1]

        # compute output
        output, output_proj = model(input_image)

        output_proj = output_proj.cpu().data.numpy()
        target_labels = set([x[0] for x in target.most_common()])

        for x_key in emotic_emotion_explaintations:

            dot_product_label = cosine_similarity(
                output_proj,
                emotic_emotion_explaintations[x_key]['target_matrix'])[0]
            pred_score = np.average(dot_product_label)
            emotic_emotion_explaintations[x_key]['pred'].append(pred_score)
            if x_key in target_labels:
                emotic_emotion_explaintations[x_key]['label'].append(1)
            else:
                emotic_emotion_explaintations[x_key]['label'].append(0)

    from sklearn.metrics import average_precision_score
    full_AP = []
    for x_key in emotic_emotion_explaintations:
        full_pred = np.array(emotic_emotion_explaintations[x_key]['pred'])
        full_label = np.array(emotic_emotion_explaintations[x_key]['label'])
        AP = average_precision_score(full_label, full_pred)
        if np.isnan(AP):
            print("{} is Nan".format(x_key))
            continue
        full_AP.append(AP)
        print("{}\t{:.4f}".format(x_key, AP * 100))
    AvgAP = np.mean(full_AP)
    print("Avg AP: {:.2f}".format(AvgAP * 100))
def main():

    import argparse
    parser = argparse.ArgumentParser(
        description="Pytorch Image CNN training from Configure Files")
    parser.add_argument(
        '--config_file',
        required=True,
        help="This scripts only accepts parameters from Json files")
    input_args = parser.parse_args()

    config_file = input_args.config_file

    args = parse_config(config_file)
    if args.name is None:
        args.name = get_stem(config_file)

    torch.set_default_tensor_type('torch.FloatTensor')
    best_prec1 = 0

    args.script_name = get_stem(__file__)
    current_time_str = get_date_str()
    if args.save_directory is None:
        save_directory = get_dir(
            os.path.join(project_root, args.ckpts_dir,
                         '{:s}'.format(args.name),
                         '{:s}-{:s}'.format(args.ID, current_time_str)))
    else:
        save_directory = get_dir(
            os.path.join(project_root, args.ckpts_dir, args.save_directory))

    print("Save to {}".format(save_directory))
    log_file = os.path.join(save_directory,
                            'log-{0}.txt'.format(current_time_str))
    logger = log_utils.get_logger(log_file)
    log_utils.print_config(vars(args), logger)

    print_func = logger.info
    print_func('ConfigFile: {}'.format(config_file))
    args.log_file = log_file

    if args.device:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.device

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    if args.pretrained:
        print_func("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True,
                                           num_classes=args.num_classes)
    else:
        print_func("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=False,
                                           num_classes=args.num_classes)

    if args.freeze:
        model = CNN_utils.freeze_all_except_fc(model)

    if args.gpu is not None:
        model = model.cuda(args.gpu)
    elif args.distributed:
        model.cuda()
        model = torch.nn.parallel.DistributedDataParallel(model)
    else:
        print_func(
            'Please only specify one GPU since we are working in batch size 1 model'
        )
        return

    if args.resume:
        if os.path.isfile(args.resume):
            print_func("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            import collections
            if not args.evaluate:
                if isinstance(checkpoint, collections.OrderedDict):
                    load_state_dict(model,
                                    checkpoint,
                                    exclude_layers=['fc.weight', 'fc.bias'])

                else:
                    load_state_dict(
                        model,
                        checkpoint['state_dict'],
                        exclude_layers=['module.fc.weight', 'module.fc.bias'])
                    print_func("=> loaded checkpoint '{}' (epoch {})".format(
                        args.resume, checkpoint['epoch']))
            else:
                if isinstance(checkpoint, collections.OrderedDict):
                    load_state_dict(model, checkpoint, strict=True)

                else:
                    load_state_dict(model,
                                    checkpoint['state_dict'],
                                    strict=True)
                    print_func("=> loaded checkpoint '{}' (epoch {})".format(
                        args.resume, checkpoint['epoch']))
        else:
            print_func("=> no checkpoint found at '{}'".format(args.resume))
            return
    else:
        print_func(
            "=> This script is for fine-tuning only, please double check '{}'".
            format(args.resume))
        print_func("Now using randomly initialized parameters!")

    cudnn.benchmark = True

    model_total_params = sum(p.numel() for p in model.parameters())
    model_grad_params = sum(p.numel() for p in model.parameters()
                            if p.requires_grad)
    print_func("Total Parameters: {0}\t Gradient Parameters: {1}".format(
        model_total_params, model_grad_params))

    # Data loading code
    # val_dataset = get_instance(custom_datasets, '{0}'.format(args.valloader), args)
    from PyUtils.pickle_utils import loadpickle
    from torchvision.datasets.folder import default_loader

    val_dataset = loadpickle(args.val_file)
    image_directory = args.data_dir
    from CNNs.datasets.multilabel import get_val_simple_transform
    val_transform = get_val_simple_transform()
    import tqdm
    import numpy as np

    if args.individual_feat:
        feature_save_directory = get_dir(
            os.path.join(save_directory, 'individual-features'))
        created_paths = set()
    else:
        data_dict = {}
        feature_save_directory = os.path.join(save_directory, 'feature.pkl')

    model.eval()

    for s_data in tqdm.tqdm(val_dataset, desc="Extracting Features"):
        if s_data is None:
            continue

        image_path = os.path.join(image_directory, s_data[0])

        try:
            input_image = default_loader(image_path)
        except:
            print("WARN: {} Problematic!, Skip!".format(image_path))

            continue

        input_image = val_transform(input_image)

        if args.gpu is not None:
            input_image = input_image.cuda(args.gpu, non_blocking=True)

        output = model(input_image.unsqueeze_(0))
        output = output.cpu().data.numpy()
        # image_rel_path = os.path.join(*(s_image_name.split(os.sep)[-int(args.rel_path_depth):]))

        if args.individual_feat:
            if image_directory in created_paths:
                np.save(
                    os.path.join(feature_save_directory,
                                 '{}.npy'.format(s_data[0])), output)
            else:
                get_dir(os.path.join(feature_save_directory, image_directory))
                np.save(
                    os.path.join(feature_save_directory,
                                 '{}.npy'.format(s_data[0])), output)
                created_paths.add(image_directory)
        else:
            data_dict[s_data[0]] = output
        # image_name = os.path.basename(s_image_name)
        #
        # if args.individual_feat:
        #         # image_name = os.path.basename(s_image_name)
        #
        #         np.save(os.path.join(feature_save_directory, '{}.npy'.format(image_name)), output)
        #         # created_paths.add(image_directory)
        # else:
        #         data_dict[get_stem(image_name)] = output

    if args.individual_feat:
        print_func("Done")
    else:
        from PyUtils.pickle_utils import save2pickle
        print_func("Saving to a single big file!")

        save2pickle(feature_save_directory, data_dict)
        print_func("Done")
def main():

    import argparse
    parser = argparse.ArgumentParser(
        description="Pytorch Image CNN training from Configure Files")
    parser.add_argument(
        '--config_file',
        required=True,
        help="This scripts only accepts parameters from Json files")
    input_args = parser.parse_args()

    config_file = input_args.config_file

    args = parse_config(config_file)
    if args.name is None:
        args.name = get_stem(config_file)

    torch.set_default_tensor_type('torch.FloatTensor')
    best_prec1 = 0

    args.script_name = get_stem(__file__)
    current_time_str = get_date_str()

    print_func = print

    if args.device:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.device

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    if args.pretrained:
        print_func("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True,
                                           num_classes=args.num_classes)
    else:
        print_func("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=False,
                                           num_classes=args.num_classes)

    if args.gpu is not None:
        model = model.cuda(args.gpu)
    elif args.distributed:
        model.cuda()
        model = torch.nn.parallel.DistributedDataParallel(model)
    else:
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            # model = torch.nn.DataParallel(model).cuda()
            model = model.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print_func("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            import collections
            if isinstance(checkpoint, collections.OrderedDict):
                load_state_dict(model,
                                checkpoint,
                                exclude_layers=['fc.weight', 'fc.bias'])

            else:
                load_state_dict(
                    model,
                    checkpoint['state_dict'],
                    exclude_layers=['module.fc.weight', 'module.fc.bias'])
                print_func("=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']))
        else:
            print_func("=> no checkpoint found at '{}'".format(args.resume))
            return
    else:
        print_func(
            "=> This script is for fine-tuning only, please double check '{}'".
            format(args.resume))
        print_func("Now using randomly initialized parameters!")

    cudnn.benchmark = True

    from PyUtils.pickle_utils import loadpickle
    # from PublicEmotionDatasets.Deepemotion.constants import emotion2idx, idx2emotion
    from PyUtils.dict_utils import string_list2dict
    import numpy as np
    from torchvision.datasets.folder import default_loader
    tag_wordvectors = loadpickle(
        '/home/zwei/Dev/AttributeNet3/TextClassification/visualizations/Embeddings/FullVocab_BN_transformed_l2_regularization.pkl'
    )
    tag_words = []
    tag_matrix = []
    label_words = []
    label_matrix = []
    from TextClassification.model_DAN_2constraints import CNN
    text_ckpt = torch.load(
        '/home/zwei/Dev/AttributeNet3/TextClassification/models/model_feature_regularization.pth.tar'
    )
    text_saved_model = text_ckpt['model']
    params = {
        "MAX_SENT_LEN": text_saved_model['MAX_SENT_LEN'],
        "BATCH_SIZE": text_saved_model['BATCH_SIZE'],
        "WORD_DIM": text_saved_model['WORD_DIM'],
        "FILTER_NUM": text_saved_model['FILTER_NUM'],
        "VOCAB_SIZE": text_saved_model['VOCAB_SIZE'],
        "CLASS_SIZE": text_saved_model['CLASS_SIZE'],
        "DROPOUT_PROB": 0.5,
    }

    text_model = CNN(**params).cuda()

    text_model.load_state_dict(text_saved_model.state_dict(), strict=True)
    embedding_tag2idx = text_ckpt['tag2idx']
    text_model.eval()

    from torch.autograd import Variable

    target_keywords_list = ['peace', 'relaxed', 'satisfied']  # peace

    emotion_category = 'Peace'

    target_padded_list = target_keywords_list + [
        len(text_saved_model['VOCAB_SIZE'])
    ] * (text_saved_model['MAX_SENT_LEN'] - len(target_keywords_list))
    target_vector = Variable(
        torch.LongTensor(target_padded_list).unsqueeze(0)).cuda()

    target_feature, _, _ = model(target_vector)
    target_feature = target_feature.squeeze(0)

    #
    # target_keywords_list = ['engagement', 'curious', 'interested']
    # emotion_category = 'Engagement'
    #
    # target_keywords_list = ['embarrassment', 'ashamed', 'guilty', 'shame']
    # emotion_category = 'Embarrassment'

    for x_tag in target_keywords_list:
        tag_matrix.append(tag_wordvectors[x_tag])

    tag_matrix = np.array(tag_matrix)
    tag_matrix = tag_matrix.squeeze(1)
    val_list = loadpickle(
        '/home/zwei/datasets/PublicEmotion/EMOTIC/z_data/test_image_based_single_person_only.pkl'
    )
    image_directory = '/home/zwei/datasets/PublicEmotion/EMOTIC/images'
    from CNNs.datasets.multilabel import get_val_simple_transform
    val_transform = get_val_simple_transform()
    model.eval()

    correct = 0
    total = len(val_list) * 1.0
    full_label = []
    full_pred = []
    import tqdm
    for i, (input_image_file, target, _,
            _) in tqdm.tqdm(enumerate(val_list),
                            desc="Evaluating Peace",
                            total=len(val_list)):
        # measure data loading time

        image_path = os.path.join(image_directory, input_image_file)
        input_image = default_loader(image_path)
        input_image = val_transform(input_image)

        if args.gpu is not None:
            input_image = input_image.cuda(args.gpu, non_blocking=True)
        input_image = input_image.unsqueeze(0).cuda()

        # target_idx = target.nonzero() [:,1]

        # compute output
        output, output_proj = model(input_image)

        output_proj = output_proj.cpu().data.numpy()

        dot_product_label = cosine_similarity(output_proj, tag_matrix)[0]
        pred_score = np.max(dot_product_label)
        full_pred.append(pred_score)
        target_labels = set([x[0] for x in target.most_common()])
        if emotion_category in target_labels:
            full_label.append(1)
        else:
            full_label.append(0)

    from sklearn.metrics import average_precision_score
    full_pred = np.array(full_pred)
    full_label = np.array(full_label)
    AP = average_precision_score(full_label, full_pred)
    print("DB")
def main():

    import argparse
    parser = argparse.ArgumentParser(
        description="Pytorch Image CNN training from Configure Files")
    parser.add_argument(
        '--config_file',
        required=True,
        help="This scripts only accepts parameters from Json files")
    input_args = parser.parse_args()

    config_file = input_args.config_file

    args = parse_config(config_file)
    if args.name is None:
        args.name = get_stem(config_file)

    torch.set_default_tensor_type('torch.FloatTensor')
    best_prec1 = 0

    args.script_name = get_stem(__file__)
    current_time_str = get_date_str()

    print_func = print

    if args.device:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.device

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    args.distributed = args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_url,
                                world_size=args.world_size)

    if args.pretrained:
        print_func("=> using pre-trained model '{}'".format(args.arch))
        visual_model = models.__dict__[args.arch](pretrained=True,
                                                  num_classes=args.num_classes)
    else:
        print_func("=> creating model '{}'".format(args.arch))
        visual_model = models.__dict__[args.arch](pretrained=False,
                                                  num_classes=args.num_classes)

    if args.gpu is not None:
        visual_model = visual_model.cuda(args.gpu)
    elif args.distributed:
        visual_model.cuda()
        visual_model = torch.nn.parallel.DistributedDataParallel(visual_model)
    else:
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            visual_model.features = torch.nn.DataParallel(
                visual_model.features)
            visual_model.cuda()
        else:
            # model = torch.nn.DataParallel(model).cuda()
            visual_model = visual_model.cuda()

    from PyUtils.pickle_utils import loadpickle

    import numpy as np
    from PublicEmotionDatasets.Emotic.constants import emotion_explainations_words_690 as emotion_self_words

    from torchvision.datasets.folder import default_loader
    tag_wordvectors = loadpickle(
        '/home/zwei/Dev/AttributeNet3/TextClassification/visualizations/Embeddings/FullVocab_BN_transformed_l2_regularization.pkl'
    )
    tag_words = []
    tag_matrix = []
    label_words = []
    label_matrix = []
    from TextClassification.model_DAN_2constraints import CNN_Embed_v2 as CNN
    text_ckpt = torch.load(
        '/home/zwei/Dev/AttributeNet3/TextClassification/models/model_feature_regularization.pth.tar'
    )
    text_saved_model = text_ckpt['model']
    params = {
        "MAX_SENT_LEN": text_saved_model.MAX_SENT_LEN,
        "BATCH_SIZE": text_saved_model.BATCH_SIZE,
        "WORD_DIM": text_saved_model.WORD_DIM,
        "FILTER_NUM": text_saved_model.FILTER_NUM,
        "VOCAB_SIZE": text_saved_model.VOCAB_SIZE,
        "CLASS_SIZE": text_saved_model.CLASS_SIZE,
        "DROPOUT_PROB": 0.5,
    }

    text_generator = CNN(**params).cuda()

    text_generator.load_state_dict(text_saved_model.state_dict(), strict=True)
    embedding_tag2idx = text_ckpt['tag2idx']
    text_generator.eval()

    text_model = Text_Transformation(300, 300, 8)
    text_model = text_model.cuda()

    if args.resume:
        if os.path.isfile(args.resume):
            print_func("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            # text_model.load_state_dict(checkpoint['text_state_dict'])
            load_state_dict(text_model, checkpoint['text_state_dict'])

            import collections
            if isinstance(checkpoint, collections.OrderedDict):
                load_state_dict(visual_model,
                                checkpoint,
                                exclude_layers=['fc.weight', 'fc.bias'])

            else:
                load_state_dict(
                    visual_model,
                    checkpoint['state_dict'],
                    exclude_layers=['module.fc.weight', 'module.fc.bias'])
                print_func("=> loaded checkpoint '{}' (epoch {})".format(
                    args.resume, checkpoint['epoch']))
        else:
            print_func("=> no checkpoint found at '{}'".format(args.resume))
            return
    else:
        print_func(
            "=> This script is for fine-tuning only, please double check '{}'".
            format(args.resume))
        print_func("Now using randomly initialized parameters!")

    cudnn.benchmark = True

    from torch.autograd import Variable

    emotic_emotion_explaintations = {}

    for x_key in emotion_self_words:
        x_words = emotion_self_words[x_key].split(',')
        x_feature = [embedding_tag2idx[x] for x in x_words] + \
                    [text_saved_model.VOCAB_SIZE+1]*(text_saved_model.MAX_SENT_LEN - len(x_words))
        x_feature = Variable(torch.LongTensor(x_feature).unsqueeze(0)).cuda()

        tag_matrix = text_generator(x_feature)
        _, tag_feature = text_model(tag_matrix)
        # tag_matrix = tag_matrix.squeeze(1)
        item = {}
        item['pred'] = []
        item['label'] = []
        item['target_matrix'] = tag_feature.cpu().data.numpy()
        item['description'] = x_words
        emotic_emotion_explaintations[x_key] = item

    val_list = loadpickle(
        '/home/zwei/datasets/PublicEmotion/EMOTIC/z_data/test_image_based_single_person_only.pkl'
    )
    image_directory = '/home/zwei/datasets/PublicEmotion/EMOTIC/images'
    from CNNs.datasets.multilabel import get_val_simple_transform
    val_transform = get_val_simple_transform()
    visual_model.eval()

    import tqdm
    for i, (input_image_file, target, _,
            _) in tqdm.tqdm(enumerate(val_list),
                            desc="Evaluating Peace",
                            total=len(val_list)):
        # measure data loading time

        image_path = os.path.join(image_directory, input_image_file)
        input_image = default_loader(image_path)
        input_image = val_transform(input_image)

        if args.gpu is not None:
            input_image = input_image.cuda(args.gpu, non_blocking=True)
        input_image = input_image.unsqueeze(0).cuda()

        # target_idx = target.nonzero() [:,1]

        # compute output
        output, output_proj = visual_model(input_image)

        output_proj = output_proj.cpu().data.numpy()
        target_labels = set([x[0] for x in target.most_common()])

        for x_key in emotic_emotion_explaintations:

            dot_product_label = cosine_similarity(
                output_proj,
                emotic_emotion_explaintations[x_key]['target_matrix'])[0]
            pred_score = np.average(dot_product_label)
            emotic_emotion_explaintations[x_key]['pred'].append(pred_score)
            if x_key in target_labels:
                emotic_emotion_explaintations[x_key]['label'].append(1)
            else:
                emotic_emotion_explaintations[x_key]['label'].append(0)

    from sklearn.metrics import average_precision_score
    full_AP = []
    for x_key in emotic_emotion_explaintations:
        full_pred = np.array(emotic_emotion_explaintations[x_key]['pred'])
        full_label = np.array(emotic_emotion_explaintations[x_key]['label'])
        AP = average_precision_score(full_label, full_pred)
        if np.isnan(AP):
            print("{} is Nan".format(x_key))
            continue
        full_AP.append(AP)
        print("{}\t{:.4f}".format(x_key, AP * 100))
    AvgAP = np.mean(full_AP)
    print("Avg AP: {:.2f}".format(AvgAP * 100))