コード例 #1
0
def create_models():
    models = []
    for i in range(5):
        model = create_model(f'../input/quest-models/best_{i}.pth')
        model.eval()
        models.append(model)
    return models
コード例 #2
0
def load_ensemble_from_checkpoints(checkpoints):
    models = []
    for chk in checkpoints:
        model = get_model()
        model.load_state_dict(chk["model"])
        models.append(model)
    ensemble = Ensemble(models)
    ensemble.to(get_device())
    return ensemble
コード例 #3
0
def load_models(models_path, arch=None, n_folds=3, device='cuda'):

    models = []
    for i in range(n_folds):
        models.append( AudioClassifier(arch_name=arch) )
        models[i].to(device)
        try:
            models[i].load_from_checkpoint(os.path.join(models_path, f'ZINDI-GIZ-NLP-AGRI-KEYWORDS-{arch}-{i}-based.ckpt'))
        except:
            models[i].load_from_checkpoint(os.path.join(models_path, f'ZINDI-GIZ-NLP-AGRI-KEYWORDS-{arch}-{i}-based-v0.ckpt'))
        models[i].eval()

    return models
コード例 #4
0
def create_models():
    models = []
    models.append(create_dense201())
    models.append(create_dense161())
    models.append(create_res101())
    models.append(create_res152())
    return models
コード例 #5
0
ファイル: Utilities.py プロジェクト: nclgbd/PyTorch-Utilities
def reload_models(model: nn.Module,
                  model_dir: str,
                  folder_name: str,
                  device="cuda",
                  debug=False) -> list:
    '''Reloads multiple models based on a directory passed through. Useful for quickly loading directories 
    
    Parameters
    ----------
    `model` : `nn.Module`\n
        The model.
    `model_dir` : `str`\n
        Path to the directory.
    `folder_name` : `str`\n
        Name of the folder.
    `device` : `str`, `optional`\n
        String representation of the GPU core to use or the CPU, by default "cuda".
    `debug` : `bool`, `optional`\n
        Boolean indicating whether to print out debugging information or not.

    
    Returns
    -------
    `models` : `list`\n
        List of all the saved models in evaluation mode.
    
    '''

    models = []

    print('Reading in models...')
    path = os.path.join(model_dir, folder_name)
    for i, (subdir, dirs, files) in enumerate(os.walk(path)):
        if not files:
            continue

        for f in files:
            if debug:
                print(f'Reading {f}')

            model = model.to(device)
            weights = os.path.join(subdir, f)
            model.load_state_dict(
                state_dict=torch.load(weights)['model_state_dict'])
            model.eval()
            models.append(model)

    return models
コード例 #6
0
ファイル: Utilities.py プロジェクト: nclgbd/PyTorch-Utilities
def reload_models(model, model_dir, folder_name, device="cuda"):
  models = []

  print('Reading in models...')
  path = os.path.join(model_dir, folder_name)
  for i, (subdir, dirs, files) in enumerate(os.walk(path)):
    if not files:
      continue

    for f in files:
      print(f'Reading {f}')
      model = model.to(device)
      model.load_state_dict(state_dict=torch.load(subdir+'/'+f)['model_state_dict'])
      models.append(model)

  return models
コード例 #7
0
def load_models(model_dirs):
    """
    load models from all the provided directories.
    Any file with a .pth extension is considered a model file.
    """
    models = []
    for directory in model_dirs:
        filenames = os.listdir(directory)
        for filename in filenames:
            if filename.endswith('.pth'):
                model_path = os.path.join(directory, filename)
                model = get_model()
                target_device = None if torch.cuda.is_available() else 'cpu'
                model.load_state_dict(
                    torch.load(model_path, map_location=target_device))
                models.append(model)
    return models
コード例 #8
0
def get_models(cfg):
    """Get PyTorch model."""
    models = list()
    for i in range(len(cfg.modelnames)):
        if cfg.modelnames[i].startswith("/timm/"):
            model = timm.create_model(cfg.modelnames[i][6:], pretrained=False)
        elif cfg.modelnames[i].startswith("/torch/"):
            model = getattr(models, cfg.modelnames[i][7:])(pretrained=False)

        lastlayer = list(model._modules)[-1]
        try:
            setattr(model, lastlayer, nn.Linear(in_features=getattr(model, lastlayer).in_features,
                                                    out_features=cfg.NUMCLASSES, bias=True))
        except torch.nn.modules.module.ModuleAttributeError:
            setattr(model, lastlayer, nn.Linear(in_features=getattr(model, lastlayer)[1].in_features,
                                                out_features=cfg.NUMCLASSES, bias=True))/home/toefl/K/cassava/mendeley
        model = model.to(cfg.device).eval()
        models.append(model)
    return models
コード例 #9
0
def plot_image_with_models_benchmark_on_special_gpu_between_envs(gpu, phase, precision, big_data_frame):

    df_envs_models_time = big_data_frame[(big_data_frame.gpus == gpu) & (
        big_data_frame.phases == phase) & (big_data_frame.precisions == precision)]
    df_envs_models_time = df_envs_models_time.sort_values(['envs', 'models'])

    models = []
    envs = list(set(df_envs_models_time['envs'].tolist()))

    envs_time_dict = {}
    for index, rows in df_envs_models_time.iterrows():
        # print(index,rows)
        if rows.envs in envs_time_dict:
            envs_time_dict[rows.envs].append(rows.time)
        else:
            envs_time_dict[rows.envs] = [rows.time]
        if rows.models not in models:
            models.append(rows.models)

    plotdata = pd.DataFrame(envs_time_dict, index=models)
    plotdata = plotdata.sort_index(axis=1)
    plotdata = plotdata.sort_index(axis=0)

    plotdata.plot(figsize=(30, 13), kind="bar", rot=-15, alpha=0.4)

    plt.xlabel("Models", fontsize=14)
    plt.ylabel("Time", fontsize=14)

    plt.title('{} {} models with {} precision'.format(
        gpu, phase, precision), fontsize=16)

    benchmark_images_save_dir = ENVS_BENCHMARK_IMAGE_SAVE_DIR + '/' + \
        '{}_benchmark_bewteen_{}'.format(gpu, "_".join(sorted(envs)))
    if not os.path.exists(benchmark_images_save_dir):
        os.makedirs(benchmark_images_save_dir)

    plt_image_name = '{} {}_models_with_{}_precision_between_{}'.format(
        gpu, phase, precision, "_".join(sorted(envs)))
    save_path = os.path.join(benchmark_images_save_dir, plt_image_name)
    plt.savefig(save_path)
コード例 #10
0
def plot_image_for_compare_model_benchmark_on_multiple_gpus(env, phase, precision, big_data_frame):

    df_gpus_models_time = big_data_frame[(big_data_frame.envs == env) & (
        big_data_frame.phases == phase) & (big_data_frame.precisions == precision)]
    df_gpus_models_time = df_gpus_models_time.sort_values(['gpus', 'models'])

    models = []
    gpus = list(set(df_gpus_models_time['gpus'].tolist()))

    gpus_time_dict = {}
    for _, rows in df_gpus_models_time.iterrows():
        if rows.gpus in gpus_time_dict:
            gpus_time_dict[rows.gpus].append(rows.time)
        else:
            gpus_time_dict[rows.gpus] = [rows.time]
        if rows.models not in models:
            models.append(rows.models)

    plotdata = pd.DataFrame(gpus_time_dict, index=models)
    plotdata = plotdata.sort_index(axis=1)
    plotdata = plotdata.sort_index(axis=0)

    plotdata.plot(figsize=(30, 13), kind="bar", rot=-15, alpha=0.4)
    plt.xlabel("Models", fontsize=14)
    plt.ylabel("Time", fontsize=14)
    plt.title('{} models with {} precision on multiples gpus'.format(
        phase, precision), fontsize=16)

    gpu_benchmark_images_save_dir = GPUS_BENCHMARK_IMAGE_SAVE_DIR + \
        '/' + "{}_in_{}".format("_".join(sorted(gpus)), env)
    if not os.path.exists(gpu_benchmark_images_save_dir):
        os.makedirs(gpu_benchmark_images_save_dir)

    plt_image_name = '{}_models_with_{}_precision_on_gpus_{}'.format(
        phase, precision, "_".join(sorted(gpus)))
    save_path = os.path.join(gpu_benchmark_images_save_dir, plt_image_name)
    plt.savefig(save_path)
コード例 #11
0
    def predict(self, path, n_models):
        """
        Use models from saved snapshots for prediction.

        Parameters:
            path - path to directory containing checkpoint files
            n_models - number of models to use for prediction
                       (top n_models will be used,
                       models sorted based on validation accuracy)

        """
        models = []
        checkpoints = os.listdir(path)
        for curr in checkpoints:
            state = self.load_checkpoint(f'{path}/{curr}')
            model = state['model']
            valid_acc = state['valid_acc']
            models.append((model, valid_acc))

        models.sort(key=lambda x: x[1], reverse=True)

        models = models[:n_models]

        corrects = 0

        for iteration, (test_input, test_label) in enumerate(self.test_dl, 1):
            test_output_label = []
            for curr_model, valid_acc in models:
                self.model.load_state_dict(curr_model)
                self.model.to(self.device)
                test_output_label.append((self.test_batch(test_input)))
            test_output_label = torch.tensor([x.cpu().numpy()
                                             for x in test_output_label])
            final_votes = torch.mode(test_output_label, dim=0)[0]
            corrects += torch.sum(final_votes == test_label)

        return corrects.item()/len(self.test_dl.dataset)
コード例 #12
0
ファイル: NewDiff.py プロジェクト: wanglikuan/ErrCmpn
            model = models.resnet34(pretrained=False)

            train_transform = transforms.Compose([
                transforms.ToTensor(),
                transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])
            ])
            test_transform = train_transform
            train_dataset = datasets.ImageFolder(args.data_dir, train=True, download=False,
                                             transform=train_transform)
            test_dataset = datasets.ImageFolder(args.data_dir, train=False, download=False,
                                            transform=test_transform)
        else:
            print('Model must be {} or {}!'.format('MnistCNN', 'AlexNet'))
            sys.exit(-1)
        models.append(model)
    train_bsz = args.train_bsz
    train_bsz /= len(workers)
    train_bsz = int(train_bsz)

    train_data = partition_dataset(train_dataset, workers)
    train_data_list = []
    for i in workers:
        train_data_sub = select_dataset(workers, i, train_data, batch_size=train_bsz)
        train_data_list.append(train_data_sub)

    test_bsz = 400
    # 用所有的测试数据测试
    test_data = DataLoader(test_dataset, batch_size=test_bsz, shuffle = False)

    iterations_epoch = int(len(train_dataset) / args.train_bsz)
コード例 #13
0
def main():
    sample_dir = './test_DI-2-FGSM-3/'
    if not os.path.exists(sample_dir):
        os.makedirs(sample_dir)

    InceptionResnet_model_1 = InceptionResnetV1(
        pretrained='vggface2').eval().to(device_0)
    print('load InceptionResnet-vggface2.pt successfully')

    # InceptionResnet_model_2 = InceptionResnetV1(pretrained='casia-webface').eval().to(device_0)
    # print('load InceptionResnet-casia-webface.pt successfully')

    IR_50_model_1 = IR_50([112, 112])
    IR_50_model_1.load_state_dict(
        torch.load(
            '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/backbone_ir50_asia.pth'
        ))
    IR_50_model_1.eval().to(device_0)
    print('load IR_50 successfully')

    #     IR_152_model_1 = IR_152([112, 112])
    #     IR_152_model_1.load_state_dict(
    #         torch.load(
    #             '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/irse/model/Backbone_IR_152_Epoch_112_Batch_2547328_Time_2019-07-13-02-59_checkpoint.pth'))
    #     IR_152_model_1.eval().to(device_0)
    #     print('load IR_152 successfully')

    #     IR_SE_50 = Backbone(50,mode='ir_se').eval().to(device_1)
    #     print('load IR_SE_50 successfully')

    #     mobileFaceNet = MobileFaceNet(512).eval().to(device_0)
    #     print('load mobileFaceNet successfully')

    Insightface_iresnet34 = insightface.iresnet34(pretrained=True)
    Insightface_iresnet34.eval().to(device_1)
    print('load Insightface_iresnet34 successfully')

    Insightface_iresnet50 = insightface.iresnet50(pretrained=True)
    Insightface_iresnet50.eval().to(device_1)
    print('load Insightface_iresnet50 successfully')

    Insightface_iresnet100 = insightface.iresnet100(pretrained=True)
    Insightface_iresnet100.eval().to(device_1)
    print('load Insightface_iresnet100 successfully')

    # ##########################vgg16
    #     from Face_recognition.vgg16.vgg16 import CenterLossModel,loadCheckpoint
    #     vgg16_checkpoint=loadCheckpoint('/notebooks/Workspace/tmp/pycharm_project_314/TianChi/Face_recognition/vgg16/model');
    #
    #     VGG16 = CenterLossModel(embedding_size=512,num_classes=712,checkpoint=vgg16_checkpoint).eval().to(device_1)
    #     print('load VGG16 successfully')

    arc_face_ir_se_50 = Arcface()
    arc_face_ir_se_50.eval()
    arc_face_ir_se_50.to(device_0)

    models = []
    models.append(InceptionResnet_model_1)
    models.append(IR_50_model_1)
    models.append(Insightface_iresnet34)
    models.append(Insightface_iresnet50)
    models.append(Insightface_iresnet100)
    models.append(arc_face_ir_se_50)

    criterion = nn.MSELoss()
    # cpu
    # collect all images to attack
    paths = []
    picpath = '/notebooks/Workspace/tmp/pycharm_project_314/TianChi/images'
    for root, dirs, files in os.walk(picpath):
        for f in files:
            paths.append(os.path.join(root, f))
    random.shuffle(paths)

    # paras
    eps = 1
    steps = 50
    output_path = './output_img'
    momentum = 0.3
    alpha = 0.35
    beta = 0.3
    gamma = 0.1

    #####cal mean feature face
    print('cal mean feature face #########################')

    # ##########
    # cal mean feature face on only 712 images
    #     mean_face_1 = torch.zeros(1,3,112,112).detach()
    #     for path in tqdm(paths):
    #         image = Image.open(path)
    #         in_tensor_1 = img2tensor(np.array(image))
    #         mean_face_1 += in_tensor_1
    #     mean_face_1 = mean_face_1 / 712
    # ##########

    #     with torch.no_grad():
    #         mean_face = torch.zeros(512).detach()
    #         for path in tqdm(paths):
    #             start = time.time()
    #             print('cal mean face ' + path + '  ===============>')
    #             image = Image.open(path)
    #
    #
    #             # define paras
    #             # in_tensor is origin tensor of image
    #             # in_variable changes with gradient
    #
    #             in_tensor_1 = img2tensor(np.array(image))
    #             # print(in_tensor.shape)
    #             this_feature_face = None
    #
    #
    #             # # origin feature
    #
    #             _origin_InceptionResnet_model_1 = InceptionResnet_model_1(in_tensor_1.to(device_0)).cpu()
    #             ####################
    # #             _origin_InceptionResnet_model_2 = InceptionResnet_model_2(in_tensor_1.to(device_0)).cpu()
    #             ######################
    # #             _origin_IR_50_model_1 = IR_50_model_1(in_tensor_1.to(device_0)).cpu()
    #             ###########################
    # #             _origin_IR_152_model_1 = IR_152_model_1(in_tensor_1.to(device_0)).cpu()
    # #             _origin_IR_SE_50 = IR_SE_50(in_tensor_1.to(device_1)).cpu()
    # #             _origin_mobileFaceNet = mobileFaceNet(in_tensor_1.to(device_0)).cpu()
    #             #############################
    #
    #             _origin_Insightface_iresent34 = Insightface_iresnet34(in_tensor_1.to(device_1)).cpu()
    #
    #
    #             _origin_Insightface_iresent50 = Insightface_iresnet50(in_tensor_1.to(device_1)).cpu()
    #
    #
    #             _origin_Insightface_iresent100 = Insightface_iresnet100(in_tensor_1.to(device_1)).cpu()
    #
    #
    #             _origin_arcface = arc_face_ir_se_50(in_tensor_1.to(device_0)).cpu()
    #
    #             ########################
    # #             _origin_VGG16 = VGG16.forward_GetFeature(in_tensor_1.to(device_1)).cpu()
    #             ########################
    #
    #             this_feature_face = _origin_InceptionResnet_model_1 + \
    #                                 _origin_Insightface_iresent34  + \
    #                                 _origin_Insightface_iresent50  + \
    #                                 _origin_Insightface_iresent100  + \
    #                                 _origin_arcface
    #
    # #             this_feature_face = _origin_InceptionResnet_model_1 + \
    # #                                 _origin_InceptionResnet_model_2 +\
    # #                                 _origin_IR_50_model_1 + \
    # #                                 _origin_IR_152_model_1 +\
    # #                                 _origin_IR_SE_50 +\
    # #                                 _origin_mobileFaceNet +\
    # #                                 _origin_Insightface_iresent34  + \
    # #                                 _origin_Insightface_iresent50  + \
    # #                                 _origin_Insightface_iresent100  + \
    # #                                 _origin_arcface +\
    # #                                 _origin_VGG16
    #
    #
    #             this_feature_face = this_feature_face / 5.
    #             mean_face = mean_face + this_feature_face
    #
    # #         del _origin_InceptionResnet_model_1
    # #         del _origin_InceptionResnet_model_2
    # #         del _origin_IR_50_model_1
    # #         del _origin_IR_152_model_1
    # #         del _origin_IR_SE_50
    # #         del _origin_mobileFaceNet
    # #         del _origin_Insightface_iresent34
    # #         del _origin_Insightface_iresent50
    # #         del _origin_Insightface_iresent100
    # #         del _origin_VGG16
    # #         del _origin_arcface
    # #         del this_feature_face
    # #         del in_tensor_1
    #
    #         del _origin_InceptionResnet_model_1
    # #         del _origin_IR_50_model_1
    #         del _origin_Insightface_iresent34
    #         del _origin_Insightface_iresent50
    #         del _origin_Insightface_iresent100
    #         del _origin_arcface
    #         del this_feature_face
    #         del in_tensor_1
    #
    #     mean_face = mean_face / 712.

    mean_face = cal_mean_face_by_extend_dataset(models)
    print('finish cal mean face...')
    ############################

    print('######attack...##################')
    from mydataset import CustomDataset
    custom_dataset = CustomDataset()
    train_loader = torch.utils.data.DataLoader(dataset=custom_dataset,
                                               batch_size=6,
                                               shuffle=True)
    count = 0
    progressRate = 0.0
    for i, (x, path) in enumerate(train_loader):
        start = time.time()
        print('processing ' + str(progressRate) + '  ===============>')
        in_tensor = x
        origin_variable = in_tensor.detach()
        origin_variable = origin_variable

        in_variable = in_tensor.detach()
        in_variable = in_variable

        ##########
        in_variable_max = in_tensor.detach()
        in_variable_min = in_tensor.detach()
        in_variable_max = in_variable + 0.1
        in_variable_min = in_variable - 0.1
        ##########

        in_tensor = in_tensor.squeeze()
        in_tensor = in_tensor
        adv = None

        perturbation = torch.Tensor(x.size(0), 3, 112,
                                    112).uniform_(-0.05, 0.05)  ###这里测试
        perturbation = perturbation
        in_variable += perturbation
        in_variable.data.clamp_(-1.0, 1.0)
        in_variable.requires_grad = True
        g_noise = torch.zeros_like(in_variable)
        g_noise = g_noise

        origin_InceptionResnet_model_1 = InceptionResnet_model_1(
            origin_variable.to(device_0)).cpu()
        #         origin_InceptionResnet_model_2 = InceptionResnet_model_2(origin_variable.to(device_0)).cpu()
        origin_IR_50_model_1 = IR_50_model_1(
            origin_variable.to(device_0)).cpu()
        #         origin_IR_152_model_1 = IR_152_model_1(origin_variable.to(device_0)).cpu()
        #         origin_IR_SE_50 = IR_SE_50(origin_variable.to(device_1)).cpu()
        #         origin_mobileFaceNet = mobileFaceNet(origin_variable.to(device_0)).cpu()
        origin_Insightface_iresent34 = Insightface_iresnet34(
            origin_variable.to(device_1)).cpu()
        origin_Insightface_iresent50 = Insightface_iresnet50(
            origin_variable.to(device_1)).cpu()
        origin_Insightface_iresent100 = Insightface_iresnet100(
            origin_variable.to(device_1)).cpu()
        origin_arcface = arc_face_ir_se_50(origin_variable.to(device_0)).cpu()
        #         origin_VGG16 = VGG16.forward_GetFeature(origin_variable.to(device_1)).cpu()

        #         origin_average_out = (origin_InceptionResnet_model_1+origin_IR_50_model_1+origin_Insightface_iresent34+\
        #                origin_Insightface_iresent50+origin_Insightface_iresent100+origin_arcface)/6

        #         origin_average_out  =(origin_InceptionResnet_model_1+\
        #                               origin_InceptionResnet_model_2+ \
        #                               origin_IR_50_model_1+\
        #                               origin_IR_152_model_1+\
        #                               origin_IR_SE_50+\
        #                               origin_mobileFaceNet+\
        #                               origin_Insightface_iresent34+\
        #                               origin_Insightface_iresent50 +\
        #                               origin_Insightface_iresent100 +\
        #                               origin_arcface +\
        #                               origin_VGG16) /11.

        origin_average_out  =(origin_InceptionResnet_model_1+ \
                              origin_IR_50_model_1 +\
                              origin_Insightface_iresent34+\
                              origin_Insightface_iresent50 +\
                              origin_Insightface_iresent100 +\
                              origin_arcface ) /6.

        # target pix mean face
        # After verification, it found no use.
        #         target_mean_face_InceptionResnet_model_1 = InceptionResnet_model_1(mean_face_1.to(device_0)).cpu()
        #         target_mean_face_IR_50_model_1 = IR_50_model_1(mean_face_1.to(device_0)).cpu()
        #         target_mean_face_Insightface_iresent34 = Insightface_iresnet34(mean_face_1.to(device_1)).cpu()
        #         target_mean_face_Insightface_iresent50 = Insightface_iresnet50(mean_face_1.to(device_1)).cpu()
        #         target_mean_faceInsightface_iresent100 = Insightface_iresnet100(mean_face_1.to(device_1)).cpu()
        #         target_mean_face_arcface = arc_face_ir_se_50(mean_face_1.to(device_0)).cpu()

        #         target_mean_face_average_out = (target_mean_face_InceptionResnet_model_1 + target_mean_face_IR_50_model_1 + target_mean_face_Insightface_iresent34 + target_mean_face_Insightface_iresent50 + target_mean_faceInsightface_iresent100 + target_mean_face_arcface)/ 6

        #  sum gradient
        for i in range(steps):
            print('step: ' + str(i))
            # new_variable = input_diversity(in_variable,112,0.5)
            # 通过随机的size的padding,增加input的多样性
            mediate_InceptionResnet_model_1 = InceptionResnet_model_1(
                in_variable.to(device_0)).cpu()
            #             mediate_InceptionResnet_model_2 = InceptionResnet_model_2(new_variable.to(device_0)).cpu()
            mediate_IR_50_model_1 = IR_50_model_1(
                in_variable.to(device_0)).cpu()
            #             mediate_IR_152_model_1 = IR_152_model_1(new_variable.to(device_0)).cpu()
            #             mediate_IR_SE_50 = IR_SE_50(new_variable.to(device_1)).cpu()
            #             mediate_mobileFaceNet = mobileFaceNet(new_variable.to(device_0)).cpu()
            mediate_Insightface_iresent34 = Insightface_iresnet34(
                in_variable.to(device_1)).cpu()
            mediate_Insightface_iresent50 = Insightface_iresnet50(
                in_variable.to(device_1)).cpu()
            mediate_Insightface_iresent100 = Insightface_iresnet100(
                in_variable.to(device_1)).cpu()
            #             mediate_VGG16 = VGG16.forward_GetFeature(new_variable.to(device_1)).cpu()
            mediate_arcface = arc_face_ir_se_50(in_variable.to(device_0)).cpu()

            # average_out = (mediate_InceptionResnet_model_1+mediate_InceptionResnet_model_2+mediate_IR_50_model_1+\
            #    mediate_IR_152_model_1+mediate_IR_SE_50+mediate_mobileFaceNet+mediate_Insightface_iresent34+\
            #    mediate_Insightface_iresent50+mediate_Insightface_iresent100+mediate_VGG16)/10

            #             mediate_average_out = (mediate_InceptionResnet_model_1+mediate_IR_50_model_1+mediate_Insightface_iresent34+\
            #                mediate_Insightface_iresent50+mediate_Insightface_iresent100+mediate_arcface)/6

            #             mediate_average_out = (mediate_InceptionResnet_model_1+\
            #                                    mediate_InceptionResnet_model_2+\
            #                                    mediate_IR_50_model_1+\
            #                                    mediate_IR_152_model_1+\
            #                                    mediate_IR_SE_50+\
            #                                    mediate_mobileFaceNet+\
            #                                    mediate_Insightface_iresent34+\
            #                                    mediate_Insightface_iresent50+\
            #                                    mediate_Insightface_iresent100 +\
            #                                    mediate_VGG16+\
            #                                    mediate_arcface) /11.
            mediate_average_out = (mediate_InceptionResnet_model_1+ \
                                   mediate_IR_50_model_1 +\
                                   mediate_Insightface_iresent34+\
                                   mediate_Insightface_iresent50+\
                                   mediate_Insightface_iresent100 +\
                                   mediate_arcface) /6.

            #             loss1 = criterion(mediate_InceptionResnet_model_1, origin_InceptionResnet_model_1) + \
            #                     criterion(mediate_InceptionResnet_model_2, origin_InceptionResnet_model_2) + \
            #                     criterion(mediate_IR_50_model_1, origin_IR_50_model_1) + \
            #                     criterion(mediate_IR_152_model_1, origin_IR_152_model_1) + \
            #                     criterion(mediate_IR_SE_50, origin_IR_SE_50) + \
            #                     criterion(mediate_mobileFaceNet, origin_mobileFaceNet)+ \
            #                     criterion(mediate_Insightface_iresent34, origin_Insightface_iresent34)+ \
            #                     criterion(mediate_Insightface_iresent50, origin_Insightface_iresent50)  + \
            #                     criterion(mediate_Insightface_iresent100, origin_Insightface_iresent100)  + \
            #                     criterion(mediate_VGG16, origin_VGG16)

            loss1 = criterion(mediate_average_out, origin_average_out)

            #             loss2 = criterion(mediate_InceptionResnet_model_1, mean_face) + \
            #                     criterion(mediate_InceptionResnet_model_2, mean_face) + \
            #                     criterion(mediate_IR_50_model_1, mean_face) + \
            #                     criterion(mediate_IR_152_model_1, mean_face) +\
            #                     criterion(mediate_IR_SE_50, mean_face) + \
            #                     criterion(mediate_mobileFaceNet, mean_face) + \
            #                     criterion(mediate_Insightface_iresent34, mean_face) + \
            #                     criterion(mediate_Insightface_iresent50, mean_face) + \
            #                     criterion(mediate_Insightface_iresent100, mean_face) + \
            #                     criterion(mediate_VGG16, mean_face)
            #             loss2 = criterion(mediate_average_out, target_mean_face_average_out)
            #             loss3 = criterion(mediate_average_out,torch.zeros(512).detach())

            loss2 = criterion(mediate_average_out, mean_face)

            # loss3 = criterion(mediate_InceptionResnet_model_1,average_out)+ \
            #         criterion(mediate_InceptionResnet_model_2,average_out)+ \
            #         criterion(mediate_IR_50_model_1,average_out) + \
            #         criterion(mediate_IR_152_model_1,average_out) + \
            #         criterion(mediate_mobileFaceNet,average_out) + \
            #         criterion(mediate_Insightface_iresent34,average_out)+ \
            #         criterion(mediate_Insightface_iresent50,average_out) + \
            #         criterion(mediate_Insightface_iresent100,average_out)+ \
            #         criterion(mediate_VGG16,average_out)+ \
            #         criterion(mediate_IR_SE_50,average_out)

            #
            # loss = alpha * loss1 - beta* loss2 - gamma*loss3

            loss = alpha * loss1 - beta * loss2

            # print('loss : %f ' % loss,'loss1 : %f ' % loss1,'loss2 : %f ' % loss2,'loss3 : %f ' % loss3)
            # compute gradients

            loss.backward(retain_graph=True)

            g_noise = momentum * g_noise + (in_variable.grad /
                                            in_variable.grad.data.norm(1))
            g_noise = g_noise / g_noise.data.norm(1)

            g1 = g_noise
            g2 = g_noise

            # if i % 3 == 0 :
            kernel = gkern(3, 2).astype(np.float32)
            gaussian_blur1 = GaussianBlur(kernel)
            gaussian_blur1
            g1 = gaussian_blur1(g1)

            # else:
            addition = TVLoss()
            addition
            g2 = addition(g2)

            g_noise = 0.25 * g1 + 0.75 * g2

            in_variable.data = in_variable.data + (
                (eps / 255.) * torch.sign(g_noise)
            )  # * torch.from_numpy(mat).unsqueeze(0).float()

            in_variable.data = clip_by_tensor(in_variable.data,
                                              in_variable_min.data,
                                              in_variable_max.data)

            in_variable.grad.data.zero_()  # unnecessary


#             del new_variable

# g_noise = in_variable.data - origin_variable
# g_noise.clamp_(-0.2, 0.2)
# in_variable.data = origin_variable + g_noise

# deprocess image
        for i in range(len(in_variable.data.cpu().numpy())):
            adv = in_variable.data.cpu().numpy()[i]  # (3, 112, 112)
            perturbation = (adv - in_tensor.cpu().numpy())

            adv = adv * 128.0 + 127.0
            adv = adv.swapaxes(0, 1).swapaxes(1, 2)
            adv = adv[..., ::-1]
            adv = np.clip(adv, 0, 255).astype(np.uint8)

            # sample_dir = './target_mean_face/'
            # if not os.path.exists(sample_dir):
            #     os.makedirs(sample_dir)

            advimg = sample_dir + path[i].split('/')[-1].split(
                '.')[-2] + '.jpg'
            print(advimg)
            cv2.imwrite(advimg, adv)
            print("save path is " + advimg)
            print('cost time is %.2f s ' % (time.time() - start))

        count += 6
        progressRate = count / 712.
コード例 #14
0
def run(size):
    models = []
    anchor_models = []
    optimizers = []
    ratios = []
    iters = []
    cps = args.cp
    save_names = []
    loss_Meters = []
    top1_Meters = []
    best_test_accs = []

    if args.constant_cp:
        cps = args.cp * args.size
    elif args.persistent:
        cps = [5, 5, 5, 5, 5, 5, 5, 20, 20, 20]
    else:
        local_cps = args.cp * np.ones(size, dtype=int)
        num_slow_nodes = int(size * args.slowRatio)
        np.random.seed(2020)
        random_cps = 5 + np.random.randn(num_slow_nodes) * 2
        for i in range(len(random_cps)):
            random_cps[i] = round(random_cps[i])
        local_cps[:num_slow_nodes] = random_cps
        # local_iterations = local_cps[rank]
        cps = local_cps

    for rank in range(args.size):
        # initiate experiments folder
        save_path = 'new_results/'
        folder_name = save_path + args.name
        if rank == 0 and os.path.isdir(folder_name) == False and args.save:
            os.mkdir(folder_name)
        # initiate log files
        tag = '{}/lr{:.3f}_bs{:d}_cr{:d}_avgcp{:.3f}_e{}_r{}_n{}.csv'
        saveFileName = tag.format(folder_name, args.lr, args.bs, args.cr,
                                  np.mean(args.cp), args.seed, rank, size)
        args.out_fname = saveFileName
        save_names.append(saveFileName)
        with open(args.out_fname, 'w+') as f:
            print('BEGIN-TRAINING\n'
                  'World-Size,{ws}\n'
                  'Batch-Size,{bs}\n'
                  'itr,'
                  'Loss,avg:Loss,Prec@1,avg:Prec@1,val'.format(ws=args.size,
                                                               bs=args.bs),
                  file=f)

        globalCp = args.globalCp
        total_size = args.total_size

        # seed for reproducibility
        torch.manual_seed(args.seed)
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.deterministic = True

        # load datasets
        train_loader, test_loader, dataRatio, x, y = partition_dataset(
            rank, total_size, 1, args.alpha, args.beta, args)
        ratios.append(dataRatio)
        print(sum([len(i) for i in x]))
        data_iter = iter(train_loader)
        iters.append(data_iter)

        # define neural nets model, criterion, and optimizer
        model = util.select_model(args.model, args)
        anchor_model = util.select_model(args.model, args)

        models.append(model)
        anchor_models.append(anchor_model)

        criterion = nn.CrossEntropyLoss()
        if args.FedProx:
            optimizer = FedProx.FedProxSGD(model.parameters(),
                                           lr=args.lr,
                                           momentum=0,
                                           nesterov=False,
                                           weight_decay=1e-4)
        else:
            optimizer = optim.SGD(model.parameters(),
                                  lr=args.lr,
                                  momentum=0,
                                  nesterov=False,
                                  weight_decay=1e-4)
        optimizers.append(optimizer)

        batch_idx = 0
        best_test_accuracy = 0
        best_test_accs.append(best_test_accuracy)

        losses = util.Meter(ptag='Loss')
        top1 = util.Meter(ptag='Prec@1')
        loss_Meters.append(losses)
        top1_Meters.append(top1)

        model.train()
        tic = time.time()
        print(dataRatio, len(train_loader), len(test_loader))

    round_communicated = 0
    while round_communicated < args.cr:
        for rank in range(args.size):
            model = models[rank]
            anchor_model = anchor_models[rank]
            data_iter = iters[rank]
            optimizer = optimizers[rank]
            losses = loss_Meters[rank]
            top1 = top1_Meters[rank]

            for cp in range(cps[rank]):
                try:
                    data, target = data_iter.next()
                except StopIteration:
                    data_iter = iter(train_loader)
                    data, target = data_iter.next()

                # data loading
                data = data
                target = target

                # forward pass
                output = model(data)
                loss = criterion(output, target)

                # backward pass
                loss.backward()
                if args.FedProx:
                    optimizer.step(anchor_model, args.mu)
                else:
                    optimizer.step()
                optimizer.zero_grad()

                train_acc = util.comp_accuracy(output, target)
                losses.update(loss.item(), data.size(0))
                top1.update(train_acc[0].item(), data.size(0))

                # batch_idx += 1
            # change the worker
            train_loader, dataRatio = get_next_trainloader(
                round_communicated, x, y, rank, args)
            data_iter = iter(train_loader)
            iters[rank] = data_iter
            ratios[rank] = dataRatio

        if args.NSGD:
            NormalSGDALLreduce(models, anchor_models, cps, globalCp, ratios)
        elif args.FedProx:
            FedProx_SyncAllreduce(models, ratios, anchor_models)
        else:
            unbalanced_SyncAllreduce(models, ratios)
        round_communicated += 1
        # update_lr(optimizer, round_communicated)

        if round_communicated % 4 == 0:
            for rank in range(args.size):
                name = save_names[rank]
                losses = loss_Meters[rank]
                top1 = top1_Meters[rank]

                with open(name, '+a') as f:
                    print('{itr},'
                          '{loss.val:.4f},{loss.avg:.4f},'
                          '{top1.val:.3f},{top1.avg:.3f},-1'.format(
                              itr=round_communicated, loss=losses, top1=top1),
                          file=f)

        if round_communicated % 12 == 0:
            for rank in range(args.size):
                name = save_names[rank]
                model = models[rank]
                losses = loss_Meters[rank]
                top1 = top1_Meters[rank]
                name = save_names[rank]

                test_acc, global_loss = evaluate(model, test_loader, criterion)

                if test_acc > best_test_accs[rank]:
                    best_test_accs[rank] = test_acc

                print('itr {}, '
                      'rank {}, loss value {:.4f}, '
                      'train accuracy {:.3f}, test accuracy {:.3f}, '
                      'elasped time {:.3f}'.format(round_communicated, rank,
                                                   losses.avg, top1.avg,
                                                   test_acc,
                                                   time.time() - tic))

                with open(name, '+a') as f:
                    print('{itr},{filler},{filler},'
                          '{filler},{loss:.4f},'
                          '{val:.4f}'.format(itr=-1,
                                             filler=-1,
                                             loss=global_loss,
                                             val=test_acc),
                          file=f)

                losses.reset()
                top1.reset()
                tic = time.time()
                # return

    for rank in range(args.size):
        name = save_names[rank]
        with open(name, '+a') as f:
            print('{itr} best test accuracy: {val:.4f}'.format(
                itr=-2, val=best_test_accs[rank]),
                  file=f)
コード例 #15
0
def main(args):

    #load the dataset
    num_tasks = 2  #how many tasks do we have?
    emotion_folder = args['fer_path']
    images_path = args['faces_path']
    num_classes_emotion = len(emotion_table)

    msra_cfw_faceid_datasets = {
        x: msra.MSRA_CFW_FaceIDDataset(root_dir=images_path,
                                       mode=x,
                                       validation_folds=4,
                                       test_split=0.0)
        for x in ['train', 'val']
    }
    msra_cfw_dataloaders = {
        x: torch.utils.data.DataLoader(msra_cfw_faceid_datasets[x],
                                       batch_size=4,
                                       shuffle=True,
                                       num_workers=2)
        for x in ['train', 'val']
    }

    # read FER+ dataset.
    print("Loading data...")

    fer_datasets = {
        x: fplus_data.ferplus_Dataset(base_folder=emotion_folder,
                                      train_folders=train_folders,
                                      valid_folders=valid_folders,
                                      mode=x,
                                      classes=emotion_table)
        for x in ['train', 'val']
    }
    fer_dataloaders = {
        x: torch.utils.data.DataLoader(fer_datasets[x],
                                       batch_size=4,
                                       shuffle=True,
                                       num_workers=2)
        for x in ['train', 'val']
    }

    dataset_sizes_face = {
        x: len(msra_cfw_faceid_datasets[x])
        for x in ['train', 'val']
    }
    class_names_face = msra_cfw_faceid_datasets['train'].classes
    num_classes_face = msra_cfw_faceid_datasets['train'].num_classes
    #
    print('Num batches      : {}'.format(len(msra_cfw_dataloaders['train'])))
    print('Dataset size     : {}'.format(dataset_sizes_face['train']))
    print('Number of classes: {}'.format(num_classes_face))

    dataset_sizes_emotion = {x: len(fer_datasets[x]) for x in ['train', 'val']}

    print('Num batches      : {}'.format(len(fer_dataloaders['train'])))
    print('Dataset size     : {}'.format(dataset_sizes_emotion['train']))
    print('Number of classes: {}'.format(num_classes_emotion))

    use_gpu = torch.cuda.is_available()

    dataloaders = [msra_cfw_dataloaders, fer_dataloaders]
    dataset_sizes = [dataset_sizes_face, dataset_sizes_emotion]
    num_classes = [num_classes_face, num_classes_emotion]

    #load the model
    weights_path = args['weights_path']
    models = []
    models_for_task = []
    for task in range(0, num_tasks):
        for col in range(0, task + 1):
            model_vggf = vggf.imagenet_matconvnet_vgg_f_dag(col, weights_path)
            num_ftrs = model_vggf.fc8.in_features  # fc7 or fc8 ????
            model_vggf.fc8 = nn.Linear(num_ftrs, num_classes[task])
            models_for_task.append(model_vggf)
            print(model_vggf)
        models.append(models_for_task)

    optimizer_fer = optim.SGD(models[1][1].parameters(),
                              lr=args['fer_lr'],
                              momentum=0.9)
    optimizer_faces = optim.SGD(models[0][0].parameters(),
                                lr=args['faces_lr'],
                                momentum=0.9)
    optimizers_vggf = [optimizer_faces, optimizer_fer]

    scheduler_faces = lr_scheduler.StepLR(optimizer_faces,
                                          step_size=args['faces_step'],
                                          gamma=args['faces_gamma'])
    scheduler_fer = lr_scheduler.StepLR(optimizer_fer,
                                        step_size=args['fer_step'],
                                        gamma=args['fer_gamma'])
    schedulers_vggf = [scheduler_faces, scheduler_fer]

    if use_gpu:
        for task_id in range(0, num_tasks):
            for col in range(0, task_id + 1):
                models[task_id][col].cuda()

    criterion = nn.CrossEntropyLoss()

    models = train_model(num_tasks,
                         models,
                         dataloaders,
                         dataset_sizes,
                         criterion,
                         optimizers_vggf,
                         schedulers_vggf,
                         epochs=args['epochs'])
コード例 #16
0
    return cams


gallery_path = image_datasets['gallery'].imgs
query_path = image_datasets['query'].imgs

######################################################################

names = opt.names.split(',')
models = nn.ModuleList()

for name in names:
    model_tmp, _, epoch = load_network(name, opt)
    model_tmp.classifier.classifier = nn.Sequential()
    model_tmp = torch.nn.DataParallel(model_tmp)
    models.append(model_tmp.cuda().eval())

# Extract feature\
snapshot_feature_mat = './feature/submit_result_%s.mat' % opt.names
print('Feature Output Path: %s' % snapshot_feature_mat)
if not os.path.isfile(snapshot_feature_mat):
    with torch.no_grad():
        gallery_feature, query_feature = torch.FloatTensor(
        ), torch.FloatTensor()
        for model in models:
            q_f = extract_feature(model, dataloaders['query'])
            q_f_crop = extract_feature(model, cropped_dataloaders['query'])
            q_f = q_f + q_f_crop
            qnorm = torch.norm(q_f, p=2, dim=1, keepdim=True)
            q_f = q_f.div(qnorm.expand_as(q_f)) / np.sqrt(len(names))
コード例 #17
0
def kaggle_bagging_test(test_data_dir,
                        model_name,
                        cpp1=None,
                        cpp2=None,
                        cpp3=None,
                        cpp4=None,
                        csv_path='./result.csv',
                        batch_size=BATCH_SIZE):
    models = []
    devices = []

    image_size = make_model_setting(model_name=model_name)
    print('===== MODEL INFO =====')
    print('image size = {}'.format(image_size))
    print('======================')

    checkpoint_paths = [cpp1, cpp2, cpp3, cpp4]
    if any(checkpoint_paths) is False:
        print('[ERROR]all checkpoint files are empty')
        return

    checkpoint_paths = [cpp for cpp in checkpoint_paths if cpp is not None]
    for i, checkpoint_path in enumerate(checkpoint_paths):
        model_path = None
        print('[INFO]read checkpoint: {}'.format(checkpoint_path))
        with open(checkpoint_path, 'r') as load_f:
            config = json.load(load_f)
            model_path = config['model_path']
            print('      read model: {}'.format(model_path))
        # select device id
        device_id = 'cuda:{}'.format(i)
        device = torch.device(device_id)
        model = make_model(model_path,
                           on_client=False,
                           model_name=model_name,
                           map_location=device)
        model.eval()
        models.append(model)
        devices.append(device)
    print('[INFO]models generated')

    csv_path = os.path.abspath(csv_path)
    csv_file, csv_write = generate_csv_handle(csv_path)
    print('[INFO]create csvfile ok')

    test_data_dir = os.path.abspath(test_data_dir)
    test_transorms = default_transorms(image_size=image_size)
    test_gen = TestDataset(test_data_dir, test_transorms=test_transorms)
    test_dataiter = DataLoader(test_gen, batch_size=batch_size, shuffle=False)
    print('[INFO]generate datasets ok')

    preds = []
    row = []
    with torch.no_grad():
        for i, data in enumerate(test_dataiter):
            if i % 1000 == 0:
                print('running on {}th batch'.format(i))
            image = data[0]
            filenames = data[1]

            preds.clear()

            for j, model in enumerate(models):
                device = devices[j]
                image = image.to(device)
                pred = model(image)
                pred = torch.nn.functional.softmax(pred, dim=0)
                #pred = pred.cuda().data.cpu().numpy()
                preds.append(pred)

            preds_tensor = torch.stack(preds, dim=0)
            preds_mean = preds_tensor.mean(dim=0)
            preds_mean = preds_mean.cuda().data.cpu().numpy()
            #pred = preds_mean.max(1, keepdim=True)[1]

            row.clear()
            for (filename, probs) in zip(filenames, preds_mean):
                row.append(filename)
                row.extend(probs)
                csv_write.writerow(row)
                row.clear()

    csv_file.close()
    print('save csv to {}'.format(csv_path))
コード例 #18
0
def bagging_test(data_dir,
                 model_name,
                 cpp1=None,
                 cpp2=None,
                 cpp3=None,
                 cpp4=None,
                 batch_size=BATCH_SIZE,
                 incorrects_log='./incorrects.log',
                 valid_drivers=None):
    models = []
    devices = []

    image_size = make_model_setting(model_name=model_name)
    print('===== MODEL INFO =====')
    print('image size = {}'.format(image_size))
    print('======================')

    checkpoint_paths = [cpp1, cpp2, cpp3, cpp4]
    if any(checkpoint_paths) is False:
        print('[ERROR]all checkpoint files are empty')
        return

    checkpoint_paths = [cpp for cpp in checkpoint_paths if cpp is not None]
    for i, checkpoint_path in enumerate(checkpoint_paths):
        model_path = None
        print('[INFO]read checkpoint: {}'.format(checkpoint_path))
        with open(checkpoint_path, 'r') as load_f:
            config = json.load(load_f)
            model_path = config['model_path']
            print('      read model: {}'.format(model_path))
        device_id = 'cuda:{}'.format(i)
        device = torch.device(device_id)
        model = make_model(model_path,
                           on_client=False,
                           model_name=model_name,
                           map_location=device)
        model = model.to(device)
        model.eval()
        models.append(model)
        devices.append(device)

    print('[INFO]models generated')

    valid_transorms = default_transorms(image_size=image_size)
    #valid_dataset = ImageFolder(valid_data_dir, transform=valid_transorms)
    valid_dataset = DriverDataset(data_dir, valid_transorms, valid_drivers)
    print('[INFO]generate datasets ok')

    # output incorrects to file
    inc_f = open(incorrects_log, 'w')

    def incorrect_record(info=None, records=None):
        if info is not None:
            inc_f.write(info)
            inc_f.write('\n')

        if records is not None:
            for record in records:
                read_index = record[0]
                pred = record[1]
                data_index = valid_dataset.used_images[read_index]
                filename = valid_dataset.image_paths[data_index]
                label = valid_dataset.labels[filename]
                text = '{}:{}:{}\n'.format(filename, label, pred)
                inc_f.write(text)

    criterion = torch.nn.CrossEntropyLoss()
    loss, correct, _, incorrects = bagging_valid(models, criterion,
                                                 valid_dataset, devices)
    print('Valid set: Average of loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
          format(loss, correct, len(valid_dataset),
                 100. * correct / len(valid_dataset)))
    incorrect_record(records=incorrects)
    inc_f.close()
コード例 #19
0
def main():
    eps = FLAGS.max_epsilon / 255.0
    num_iter = FLAGS.num_iter
    alpha = eps / num_iter
    momentum = FLAGS.momentum
    num_classes = 1000
    batch_shape = [FLAGS.batch_size, 3, FLAGS.image_height, FLAGS.image_width]

    # model selection
    main_device = 'cuda:0'
    model_names = [
        'tf_efficientnet_b6_ns', 'resnest269e', 'vit_large_patch16_384',
        'tf_efficientnet_b6_ap'
    ]
    models = []
    for i, model_name in enumerate(model_names):
        model = timm.create_model(model_name,
                                  num_classes=1000,
                                  pretrained=True)
        model = model.to('cuda:{}'.format(i))
        model.eval()
        models.append(model)

    def forward_map_func(args_dict, q):
        # with global variables mdoels and main_device
        i = args_dict['id']
        x_adv = args_dict['tensor']
        diverse_input = input_diversity(x_adv)
        image_resize = models[i].default_cfg['input_size'][1:]
        mean = torch.tensor(models[i].default_cfg['mean']).view(
            3, 1, 1).to(main_device)
        std = torch.tensor(models[i].default_cfg['std']).view(
            3, 1, 1).to(main_device)
        resized_tensor = F.interpolate(diverse_input,
                                       size=image_resize,
                                       mode='bicubic')
        gaussian_tensor = functional.gaussian_blur(resized_tensor,
                                                   kernel_size=(5, 5),
                                                   sigma=(0.9, 0.9))
        normalized_tensor = (gaussian_tensor - mean) / std
        output = models[i](normalized_tensor.to(
            'cuda:{}'.format(i))).to(main_device)
        q.put(output)

    print(time.time() - start_time)

    for filenames, images, labels in load_images(
            os.path.join(FLAGS.input_dir, 'images'), batch_shape):
        batch_start = time.time()
        input_image = torch.from_numpy(images).float().to(main_device)
        input_tensor = input_image.to(main_device)

        print(labels)
        target = torch.from_numpy(labels).long().to(main_device)

        grad = torch.zeros_like(input_tensor)
        clip_tensor_one = torch.ones_like(input_tensor)
        clip_tensor_zero = torch.zeros_like(input_tensor)
        x_max = clip_by_tensor(input_tensor + eps, clip_tensor_zero,
                               clip_tensor_one)
        x_min = clip_by_tensor(input_tensor - eps, clip_tensor_zero,
                               clip_tensor_one)
        x_adv = input_tensor
        for i in range(FLAGS.num_iter):
            x_adv = x_adv.clone().detach().requires_grad_(True)

            import threading
            from queue import Queue
            q = Queue()
            threads = []
            for d in range(len(model_names)):
                t = threading.Thread(target=forward_map_func,
                                     args=({
                                         'id': d,
                                         'tensor': x_adv
                                     }, q))
                threads.append(t)
                t.start()

            [thread.join() for thread in threads]
            logits = torch.zeros(
                (FLAGS.batch_size, num_classes)).to(main_device)
            for index in range(len(threads)):
                logits += q.get().to(main_device) / len(models)

            # loss = F.cross_entropy(logits, target)
            loss = CW_loss(logits, target)
            loss.backward()
            noise = x_adv.grad
            noise = noise / torch.mean(
                torch.abs(noise), dim=[1, 2, 3], keepdim=True)
            noise = momentum * grad + noise
            x_adv = x_adv + alpha * torch.sign(noise)
            x_adv = clip_by_tensor(x_adv, x_min, x_max)
            grad = noise
        save_images(
            x_adv.permute(0, 2, 3, 1).detach().cpu().numpy(), filenames,
            FLAGS.output_dir)
        print(time.time() - batch_start)
コード例 #20
0
ファイル: vis.py プロジェクト: jysh1214/yolov2-visualization
def create_network(blocks):
    models = nn.ModuleList()

    prev_filters = 3
    out_filters = []
    conv_id = 0
    for block in blocks:
        if block['type'] == 'net':
            prev_filters = int(block['channels'])
            continue
        elif block['type'] == 'convolutional':
            conv_id = conv_id + 1
            batch_normalize = int(block['batch_normalize'])
            filters = int(block['filters'])
            kernel_size = int(block['size'])
            stride = int(block['stride'])
            is_pad = int(block['pad'])
            pad = (kernel_size - 1) // 2 if is_pad else 0
            activation = block['activation']
            model = nn.Sequential()
            if batch_normalize:
                model.add_module(
                    'conv{0}'.format(conv_id),
                    nn.Conv2d(prev_filters,
                              filters,
                              kernel_size,
                              stride,
                              pad,
                              bias=False))
                model.add_module('bn{0}'.format(conv_id),
                                 nn.BatchNorm2d(filters))
                #model.add_module('bn{0}'.format(conv_id), BN2d(filters))
            else:
                model.add_module(
                    'conv{0}'.format(conv_id),
                    nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))
            if activation == 'leaky':
                model.add_module('leaky{0}'.format(conv_id),
                                 nn.LeakyReLU(0.1, inplace=True))
            elif activation == 'relu':
                model.add_module('relu{0}'.format(conv_id),
                                 nn.ReLU(inplace=True))
            prev_filters = filters
            out_filters.append(prev_filters)
            models.append(model)
        elif block['type'] == 'maxpool':
            pool_size = int(block['size'])
            stride = int(block['stride'])
            if stride > 1:
                model = nn.MaxPool2d(pool_size, stride)
            else:
                model = MaxPoolStride1()
            out_filters.append(prev_filters)
            models.append(model)
        elif block['type'] == 'avgpool':
            model = GlobalAvgPool2d()
            out_filters.append(prev_filters)
            models.append(model)
        elif block['type'] == 'softmax':
            model = nn.Softmax()
            out_filters.append(prev_filters)
            models.append(model)
        elif block['type'] == 'cost':
            if block['_type'] == 'sse':
                model = nn.MSELoss(size_average=True)
            elif block['_type'] == 'L1':
                model = nn.L1Loss(size_average=True)
            elif block['_type'] == 'smooth':
                model = nn.SmoothL1Loss(size_average=True)
            out_filters.append(1)
            models.append(model)
        elif block['type'] == 'reorg':
            stride = int(block['stride'])
            prev_filters = stride * stride * prev_filters
            out_filters.append(prev_filters)
            models.append(Reorg(stride))
        elif block['type'] == 'route':
            layers = block['layers'].split(',')
            ind = len(models)
            layers = [int(i) if int(i) > 0 else int(i) + ind for i in layers]
            if len(layers) == 1:
                prev_filters = out_filters[layers[0]]
            elif len(layers) == 2:
                assert (layers[0] == ind - 1)
                prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
            out_filters.append(prev_filters)
            models.append(EmptyModule())
        elif block['type'] == 'shortcut':
            ind = len(models)
            prev_filters = out_filters[ind - 1]
            out_filters.append(prev_filters)
            models.append(EmptyModule())
        elif block['type'] == 'connected':
            filters = int(block['output'])
            if block['activation'] == 'linear':
                model = nn.Linear(prev_filters, filters)
            elif block['activation'] == 'leaky':
                model = nn.Sequential(nn.Linear(prev_filters, filters),
                                      nn.LeakyReLU(0.1, inplace=True))
            elif block['activation'] == 'relu':
                model = nn.Sequential(nn.Linear(prev_filters, filters),
                                      nn.ReLU(inplace=True))
            prev_filters = filters
            out_filters.append(prev_filters)
            models.append(model)
        elif block['type'] == 'region':
            loss = RegionLoss()
            anchors = block['anchors'].split(',')
            loss.anchors = [float(i) for i in anchors]
            loss.num_classes = int(block['classes'])
            loss.num_anchors = int(block['num'])
            loss.anchor_step = len(loss.anchors) / loss.num_anchors
            loss.object_scale = float(block['object_scale'])
            loss.noobject_scale = float(block['noobject_scale'])
            loss.class_scale = float(block['class_scale'])
            loss.coord_scale = float(block['coord_scale'])
            out_filters.append(prev_filters)
            models.append(loss)
        else:
            print('unknown type %s' % (block['type']))

    return models