Пример #1
0
    def __init__(self, output_file="stats.txt"):
        self.model = self.build_model()
        self.current_weights = self.model.get_weights()
        # print(len(self.model.layers))
        # for convergence check
        self.prev_train_loss = None

        self.best_loss = None
        self.best_weight = None
        self.best_round = -1

        # all rounds; losses[i] = [round#, timestamp, loss]
        # round# could be None if not applicable
        self.train_losses = []
        self.valid_losses = []
        self.train_accuracies = []
        self.valid_accuracies = []
        self.pre_train_losses = []
        self.pre_train_accuracies = []

        self.training_start_time = int(round(time.time()))

        self.baselines = Baseline()

        self.output_file = output_file
Пример #2
0
def noise_level_eval():
    args = grab_config()
    args.gpus = [0] # Force evaluation in a single gpu.

    seed_everything(42)

    logger = TensorBoardLogger(
        save_dir=args.logdir,
        version=args.experiment_name,
        name='NoisyCLIP_Logs'
    )
    trainer = Trainer.from_argparse_args(args, logger=logger, progress_bar_refresh_rate=0)

    if not os.path.exists(args.results_dir):
        os.mkdir(args.results_dir)

    if not os.path.exists(os.path.join(args.results_dir, args.experiment_name)):
        os.mkdir(os.path.join(args.results_dir, args.experiment_name))

    if not isinstance(args.noise_levels, list):
        args.noise_levels = [args.noise_levels]

    for noise_level in args.noise_levels:
        all_results = []
        for test in range(args.num_tests):
            #Choose the appropriate model based on type, and load from checkpoint.
            if args.saved_model_type == 'linear':
                saved_model = LinearProbe.load_from_checkpoint(args.checkpoint_path)
            elif args.saved_model_type == 'baseline':
                saved_model = Baseline.load_from_checkpoint(args.checkpoint_path)
            
            # Correctly define noise levels to test.
            if args.distortion == "squaremask":
                args.length = noise_level
            elif args.distortion == "randommask":
                args.percent_missing = noise_level
            elif args.distortion == "gaussiannoise":
                args.std = noise_level
            elif args.distortion == "gaussianblur":
                args.kernel_size = noise_level[0]
                args.sigma = noise_level[1]

            test_data = ImageNet100Test(args)
            results = trainer.test(model=saved_model, datamodule=test_data, verbose=False)
            all_results.extend(results)

            print("Done with " + str(noise_level))

        top1_accs = [x['test_top_1'] for x in all_results]
        top5_accs = [x['test_top_5'] for x in all_results]
        with open(os.path.join(args.results_dir, args.experiment_name, 'noise_level_{0:}.out'.format(int(100*noise_level))), 'w+') as f:
            f.write('Top 1 mean\t{0:.4f}\n'.format(np.mean(top1_accs)))
            f.write('Top 1 std\t{0:.4f}\n'.format(np.std(top1_accs, ddof=1)))
            f.write('Top 1 stderr\t{0:.4f}\n'.format(np.std(top1_accs, ddof=1)/np.sqrt(args.num_tests)))
            f.write('Top 5 mean\t{0:.4f}\n'.format(np.mean(top5_accs)))
            f.write('Top 5 std\t{0:.4f}\n'.format(np.std(top5_accs, ddof=1)))
            f.write('Top 5 stderr\t{0:.4f}\n'.format(np.std(top5_accs, ddof=1)/np.sqrt(args.num_tests)))
    def __init__(self, args):
        super(TransferLearning, self).__init__()

        self.hparams = args
        self.world_size = self.hparams.num_nodes * self.hparams.gpus

        self.train_set_transform, self.val_set_transform = grab_transforms(
            self.hparams)

        #Grab the correct model - only want the embeddings from the final layer!
        if self.hparams.saved_model_type == 'contrastive':
            saved_model = NoisyCLIP.load_from_checkpoint(
                self.hparams.checkpoint_path)
            self.backbone = saved_model.noisy_visual_encoder
        elif self.hparams.saved_model_type == 'baseline':
            saved_model = Baseline.load_from_checkpoint(
                self.hparams.checkpoint_path)
            self.backbone = saved_model.encoder.feature_extractor

        for param in self.backbone.parameters():
            param.requires_grad = False

        #Set up a classifier with the correct dimensions
        self.output = nn.Linear(self.hparams.emb_dim, self.hparams.num_classes)

        #Set up the criterion and stuff
        #(3) Set up our criterion - here we use reduction as "sum" so that we are able to average over all validation sets
        self.criterion = nn.CrossEntropyLoss(reduction="mean")

        self.train_top_1 = Accuracy(top_k=1)
        self.train_top_5 = Accuracy(top_k=5)

        self.val_top_1 = Accuracy(top_k=1)
        self.val_top_5 = Accuracy(top_k=5)

        self.test_top_1 = Accuracy(top_k=1)
        self.test_top_5 = Accuracy(top_k=5)

        #class INFECTED has label 0
        if self.hparams.dataset == 'COVID':
            self.val_auc = AUROC(pos_label=0)

            self.test_auc = AUROC(pos_label=0)
Пример #4
0
class GlobalModel(object):
    """docstring for GlobalModel"""
    def __init__(self, output_file="stats.txt"):
        self.model = self.build_model()
        self.current_weights = self.model.get_weights()
        # print(len(self.model.layers))
        # for convergence check
        self.prev_train_loss = None

        self.best_loss = None
        self.best_weight = None
        self.best_round = -1

        # all rounds; losses[i] = [round#, timestamp, loss]
        # round# could be None if not applicable
        self.train_losses = []
        self.valid_losses = []
        self.train_accuracies = []
        self.valid_accuracies = []
        self.pre_train_losses = []
        self.pre_train_accuracies = []

        self.training_start_time = int(round(time.time()))

        self.baselines = Baseline()

        self.output_file = output_file

    def build_model(self):
        raise NotImplementedError()

    # client_updates = [(w, n)..]
    def update_weights(self, client_weights, client_sizes):
        new_weights = [np.zeros(w.shape) for w in self.current_weights]
        total_size = np.sum(client_sizes)

        for c in range(len(client_weights)):
            for i in range(len(new_weights)):
                new_weights[
                    i] += client_weights[c][i] * client_sizes[c] / total_size
        self.current_weights = new_weights

    def update_weights_baseline(self, client_weights, client_sizes,
                                agg_method):
        # ["normal_atten", "atten", "TrimmedMean", "Krum", "GeoMed"]
        global selected_weights
        if agg_method == "TrimmedMean":
            selected_weights = self.baselines.cal_TrimmedMean(client_weights)
        elif agg_method == "Krum":
            selected_weights = self.baselines.cal_Krum(client_weights)
        elif agg_method == "GeoMed":
            selected_weights = self.baselines.cal_GeoMed(client_weights)
        elif agg_method == "theroy":
            selected_weights = self.baselines.cal_theroy(client_weights, 5)
        elif agg_method == "NoDetect":
            selected_weights = self.baselines.cal_NoDetect(client_weights)
        else:
            print("####### Invalid Benchmark Option #######")

        # with open('server_trimmedMean.log', 'a') as fw:
        #     for item in client_weights:
        #         fw.write( "{} [INFO] Client weights: {}".format(datetime.datetime.now(), np.array2string(np.array(item[-1]))) )
        #     fw.write( '{} [Selected]: {}\n'.format(datetime.datetime.now(), selected_weights[-1]) )
        #     fw.write( '------------------------\n')

        self.current_weights = selected_weights

    def update_weights_with_attention(self, client_weights, client_sizes,
                                      attention, attack_label):
        new_weights = [np.zeros(w.shape) for w in self.current_weights]
        total_size = np.sum(client_sizes)
        attention = np.asarray(attention)
        # print("new attention", attention)
        client_sizes = np.asarray(client_sizes) / total_size
        print("client_sizes", client_sizes)
        scores = np.multiply(attention, client_sizes)
        # print("scores", scores)
        scores_norm = scores / np.sum(scores)
        print("scores_norm", scores_norm)
        # exit()

        for c in range(len(client_weights)):
            for i in range(len(new_weights)):
                new_weights[i] += (client_weights[c][i]) * scores_norm[c]

        with open('server_attention_sign_flipping.log', 'a') as fw:
            for item in client_weights:
                fw.write("{} [INFO] Client weights: {}".format(
                    datetime.datetime.now(),
                    np.array2string(np.array(item[-1]))))
            fw.write('{} [Weights]: {}\n'.format(datetime.datetime.now(),
                                                 attention))
            fw.write('{} [Attack_Label]: {}\n'.format(datetime.datetime.now(),
                                                      " ".join(attack_label)))
            fw.write('\n------------------------\n')
        self.current_weights = new_weights

    def aggregate_loss_accuracy(self, client_losses, client_accuracies,
                                client_sizes):
        total_size = np.sum(client_sizes)
        aggr_loss = np.sum(client_losses[i] / total_size * client_sizes[i]
                           for i in range(len(client_sizes)))
        aggr_accuraries = np.sum(client_accuracies[i] / total_size *
                                 client_sizes[i]
                                 for i in range(len(client_sizes)))
        return aggr_loss, aggr_accuraries

    def aggregate_train_loss_accuracy(self, client_losses, client_accuracies,
                                      client_sizes, cur_round):
        cur_time = int(round(time.time())) - self.training_start_time
        aggr_loss, aggr_accuraries = self.aggregate_loss_accuracy(
            client_losses, client_accuracies, client_sizes)
        self.train_losses += [[cur_round, cur_time, aggr_loss]]
        self.train_accuracies += [[cur_round, cur_time, aggr_accuraries]]
        with open(self.output_file, 'w') as outfile:
            json.dump(self.get_stats(), outfile)
        return aggr_loss, aggr_accuraries

        # cur_round coule be None
    def aggregate_pre_train_loss_accuracy(self, client_losses,
                                          client_accuracies, client_sizes,
                                          cur_round):
        cur_time = int(round(time.time())) - self.training_start_time
        aggr_loss, aggr_accuraries = self.aggregate_loss_accuracy(
            client_losses, client_accuracies, client_sizes)
        self.pre_train_losses += [[cur_round, cur_time, aggr_loss]]
        self.pre_train_accuracies += [[cur_round, cur_time, aggr_accuraries]]
        with open(self.output_file, 'w') as outfile:
            json.dump(self.get_stats(), outfile)
        return aggr_loss, aggr_accuraries

    def aggregate_valid_loss_accuracy(self, client_losses, client_accuracies,
                                      client_sizes, cur_round):
        cur_time = int(round(time.time())) - self.training_start_time
        aggr_loss, aggr_accuraries = self.aggregate_loss_accuracy(
            client_losses, client_accuracies, client_sizes)
        self.valid_losses += [[cur_round, cur_time, aggr_loss]]
        self.valid_accuracies += [[cur_round, cur_time, aggr_accuraries]]
        with open(self.output_file, 'w') as outfile:
            json.dump(self.get_stats(), outfile)
        return aggr_loss, aggr_accuraries

    def get_stats(self):
        return {
            "train_loss": self.train_losses,
            "valid_loss": self.valid_losses,
            "train_accuracy": self.train_accuracies,
            "valid_accuracy": self.valid_accuracies,
            "pre_train_loss": self.pre_train_losses,
            "pre_train_accuracy": self.pre_train_accuracies,
        }
Пример #5
0
    """
    def __init__(self):
        self.transform = transforms.Compose([
            transforms.Resize(256),
            transforms.CenterCrop(224),
            transforms.ToTensor()
        ])

    def __call__(self, x):
        return self.transform(x)


args = grab_config()

#Load the saved model
saved_model = Baseline.load_from_checkpoint(args.checkpoint_path)
saved_model = saved_model.encoder
saved_model = saved_model.to('cuda')

dataset = ImageNet100(root='/tmp/ImageNet100',
                      split='val',
                      transform=ImageNetBaseTransformVal())

loader = DataLoader(dataset,
                    batch_size=1,
                    num_workers=12,
                    pin_memory=True,
                    shuffle=True)

#Set up the masking distortion and save the mask to be used in Deep Decoder
distortion = RandomMask(percent_missing=0.5, return_mask=True)
def noise_level_eval():
    args = grab_config()
    args.gpus = [0]  # Force evaluation in a single gpu.

    seed_everything(42)

    logger = TensorBoardLogger(save_dir=args.logdir,
                               version=args.experiment_name,
                               name='NoisyCLIP_Logs')
    trainer = Trainer.from_argparse_args(args, logger=logger)

    if not os.path.exists(os.path.join(args.results_dir,
                                       args.experiment_name)):
        os.mkdir(os.path.join(args.results_dir, args.experiment_name))

    for distortion in DISTORTIONS:
        print(distortion)
        for sub_distortion in SUB_DISTORTIONS[distortion]:
            print(sub_distortion)
            top_1_list = []
            top_5_list = []

            if not os.path.exists(
                    os.path.join(args.results_dir, args.experiment_name,
                                 distortion)):
                os.makedirs(
                    os.path.join(args.results_dir, args.experiment_name,
                                 distortion))

            for level in LEVELS:
                print(level)

                #Choose the appropriate model based on type, and load from checkpoint.
                if args.saved_model_type == 'linear':
                    saved_model = LinearProbe.load_from_checkpoint(
                        args.checkpoint_path)
                elif args.saved_model_type == 'baseline':
                    saved_model = Baseline.load_from_checkpoint(
                        args.checkpoint_path)

                #Load the appropriate data and run the test once with the saved model
                test_data = ImageNet100CTest(args,
                                             distortion=distortion,
                                             sub_distortion=sub_distortion,
                                             level=level)
                results = trainer.test(model=saved_model,
                                       datamodule=test_data,
                                       verbose=False)

                top1_accs = results[0]['test_top_1']
                top5_accs = results[0]['test_top_5']

                print(top1_accs)

                top_1_list.extend([top1_accs])
                top_5_list.extend([top5_accs])

                with open(
                        os.path.join(args.results_dir, args.experiment_name,
                                     distortion, sub_distortion + '.out'),
                        'a+') as f:
                    f.write(level + ':\t{0:.4f}'.format(top1_accs) +
                            '\t{0:.4f}\n'.format(top5_accs))

            with open(
                    os.path.join(args.results_dir, args.experiment_name,
                                 distortion, sub_distortion + '.out'),
                    'a+') as f:
                f.write('MEAN:\t{0:.4f}\t{1:.4f}\n'.format(
                    np.mean(top_1_list), np.mean(top_5_list)))