Ejemplo n.º 1
0
def get_gradient(model,
                 x,
                 label,
                 criterion,
                 similarity_coeffs=None,
                 mask=None):
    x = autograd.Variable(x, requires_grad=True).cuda()

    if type(model) is list:
        if similarity_coeffs is None:
            similarity_coeffs = dict(
                zip([i for i in range(len(model))],
                    [1 / len(model)] * len(model)))

        loss = torch.zeros(1).cuda()
        for arch, current_model in zip(similarity_coeffs.keys(), model):
            current_model.cuda()
            prediction = predict(current_model, x)
            current_loss = criterion(prediction, label)
            loss = torch.add(loss, similarity_coeffs[arch] * current_loss)
    else:
        prediction = predict(model, x)
        loss = criterion(prediction, label)

    if mask is not None:
        x.register_hook(lambda grad: grad * mask.float())

    gradient = autograd.grad(loss, x)[0]
    return gradient.cpu()
Ejemplo n.º 2
0
    def selective_transfer(self, image_batch, mask_batch, targets, step):
        model_scores = dict(zip(self.available_surrogates_list, [0] * len(self.available_surrogates_list)))
        mse_criterion = torch.nn.MSELoss(reduction='mean')
        batch_indices = torch.arange(image_batch.size(0))

        step.eps = self.args_dict['sigma']

        x = image_batch.clone().detach().requires_grad_(False)
        if self.args_dict['targeted']:
            original_labels = torch.argmax(predict(self.model, x), dim=1)
        else:
            original_labels = targets

        x = torch.cat([x.unsqueeze(0)] * self.args_dict['num_transformations'])
        x = step.random_perturb(x, mask_batch)

        predictions = []
        labels = []
        for current_x in x:
            predictions.append(predict(self.model, current_x))
            current_labels = torch.argmax(predictions[-1], dim=1)
            labels.append(current_labels)

            self.args_dict['label_shifts'] += torch.sum(~torch.eq(current_labels, original_labels)).item()

        for arch in self.available_surrogates_list:
            current_model = get_model(arch, 'standard', freeze=True, device=self.args_dict['device']).eval()

            for index, current_x in enumerate(x):
                current_predictions = predict(current_model, current_x)
                current_loss = mse_criterion(current_predictions[batch_indices, labels[index]],
                                             predictions[index][batch_indices, labels[index]])
                model_scores[arch] += current_loss.item()

            to_device(current_model, 'cpu')

        surrogates_list = [arch
                           for arch in sorted(model_scores, key=model_scores.get)
                           [:self.args_dict['num_surrogates']]]

        if self.args_dict['similarity_coeffs']:
            scores_reversed = torch.FloatTensor([model_scores[arch] for arch in surrogates_list][::-1])
            coeffs = (scores_reversed / torch.sum(scores_reversed)).tolist()
        else:
            coeffs = [1 / len(surrogates_list)] * len(surrogates_list)

        self.similarity_coeffs = (dict(zip(surrogates_list, coeffs)))
        ALL_SIMILARITY_COEFFS.append(self.similarity_coeffs)

        surrogate_models = [get_model(arch, pretrained=True, freeze=True).eval()
                            for arch in surrogates_list]
        return surrogate_models
Ejemplo n.º 3
0
def get_averages_dict(model, criterion, args_dict):
    averages_dict = {}

    for category_file in os.listdir(args_dict['dataset']):
        category_grads = []
        if category_file.endswith('.pt'):
            dataset = torch.load(
                os.path.join(args_dict['dataset'], category_file))

            if dataset.__len__() == 0:
                continue
            for image, mask in dataset:
                prediction = predict(model, image.cuda())
                label = torch.argmax(prediction, dim=1).cuda()

                grad = get_gradient(model, image, label, criterion)
                foreground_grad = grad * mask
                background_grad = grad - foreground_grad

                if args_dict['normalize_grads']:
                    foreground_grad, background_grad = normalize_grad(
                        foreground_grad), normalize_grad(background_grad)
                category_grads.append(
                    [foreground_grad.cpu(),
                     background_grad.cpu()])

            foreground_average, background_average = get_category_average(
                category_grads, dataset.__len__())
            averages_dict[dataset.category] = [
                foreground_average, background_average
            ]

    return averages_dict
Ejemplo n.º 4
0
def upload_file():
    # check if the post request has the file part
    if "file" not in request.files:
        resp = jsonify({"message": "No file part in the request"})
        resp.status_code = 400
        return resp
    file = request.files["file"]
    if file.filename == "":
        resp = jsonify({"message": "No file selected for uploading"})
        resp.status_code = 400
        return resp
    if file and allowed_file(file):
        file.seek(0) # in case file was read through earlier
        encoded_image = file.read()
        image = load_image(encoded_image, target_shape)

        # Here the image is fetched from the api, and correctly encoded for the model
        pred = predict(model, image, labels)  # Predict on the model
        pred = {x: float(pred[x]) for x in pred}  # Translate np float -> python float to allow it to be made to json
        resp = jsonify(pred)  # Build response as json
        resp.status_code = 200
        return resp
    else:
        resp = jsonify(
            {"message": "Allowed file types are png, jpg, jpeg"})
        resp.status_code = 400
        return resp
Ejemplo n.º 5
0
def upload_file():
    global model, labels_dict
    if request.method == 'POST':
        f = request.files['file']
        filename = secure_filename(f.filename)
        f.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
        print(os.path.join(app.config['UPLOAD_FOLDER'], filename))

        # Raw input
        image = Image.open(f)
        image = np.array(image)
        predict_label, prob = model_utils.predict(model, labels_dict, image)

        # Get CAM heat map
        model_utils.visualize_cam(model,
                                  model_utils.resize_image(image),
                                  last_conv_layer_index=-5,
                                  learning_phase=0,
                                  show=False,
                                  path_to_save=os.path.join(
                                      app.config['UPLOAD_FOLDER'],
                                      'cam_' + filename))

        print('Predict label: ', predict_label)
    return render_template(
        "prediction_page.html",
        image_filename='http://localhost:3333/upload/' + filename,
        cam_filename='http://localhost:3333/upload/' + 'cam_' + filename,
        predict_label=predict_label[0],
        prob=prob[0])
Ejemplo n.º 6
0
def predict_language(review_text):
    review_text = remove_emojis(review_text)
    review_text = preprocess(review_text)
    prediction = predict(review_text)
    label = prediction[0][0]
    score = prediction[1][0]
    label = label.replace('__label__', '')
    return label, score
Ejemplo n.º 7
0
    def fit(self, images, labels):
        for epoch in range(self.training_args_dict['epochs']):
            current_loss = 0.0
            images_loader, labels_loader = create_data_loaders(images,
                                                               labels,
                                                               shuffle=True)

            for image_batch, label_batch in zip(images_loader, labels_loader):
                image_batch, label_batch = image_batch.cuda(
                ), label_batch.cuda()

                if self.adversarial:
                    image_batch = self.create_adversarial_examples(
                        image_batch, label_batch)

                self.model = self.model.cuda().train()
                predictions = predict(self.model,
                                      self.normalize(image_batch.cuda()))

                self.optimizer.zero_grad()
                loss = self.criterion(predictions, label_batch)
                loss.backward()
                self.optimizer.step()

                if self.training_args_dict['weight_averaging']:
                    with torch.no_grad():
                        old_parameters = self.model.parameters()
                        for (name, parameter), old_parameter in zip(
                                self.model.named_parameters(), old_parameters):
                            if 'weight' in name:
                                parameter.copy_(
                                    (parameter + old_parameter) / 2)

                        predictions = predict(self.model,
                                              self.normalize(image_batch))
                        loss = self.criterion(predictions, label_batch)

                current_loss += loss.item() * image_batch.size(0)

            epoch_loss = current_loss / len(images)
            print('Epoch: {}/{} - Loss: {}'.format(
                str(epoch + 1), str(self.training_args_dict['epochs']),
                str(epoch_loss)))

            self.losses.append(epoch)
Ejemplo n.º 8
0
def predict(num_timesteps_input, num_timesteps_output):
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    loss_criterion = nn.MSELoss()
    interactive_app_path = dirname(dirname(
        abspath(__file__)))  # Use this. Having issues with Heroku path system
    raw_dir = os.path.join(interactive_app_path, 'data', 'raw')
    process_dir = os.path.join(interactive_app_path, 'data', 'processed')
    preprocessing_utils.processed(raw_dir, process_dir, overwrite=True)
    A, X, metadata, cat2index, timesteps, means, stds = preprocessing_utils.load(
        process_dir)
    test_original_data = X
    test_input, test_target = preprocessing_utils.generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    adj_mat = preprocessing_utils.get_normalized_adj(A)
    adj_mat = torch.from_numpy(adj_mat).to(device).float()

    i = 0
    indices = [(i, i + (num_timesteps_input + num_timesteps_output))]
    features, target = [], []
    for i, j in indices:
        features.append(X[:, :, i:i + num_timesteps_input].transpose(
            (0, 2, 1)))
        target.append(X[:, 0, i + num_timesteps_input:j])

    test_input = torch.from_numpy(np.array(features)).to(device)
    test_target = torch.from_numpy(np.array(target)).to(device)

    # Load model
    traffic_prediction_path = dirname(interactive_app_path)
    # saved_models_path = os.path.join(traffic_prediction_path, 'saved_models', 'last_saved_model.txt')
    # with open(saved_models_path) as f:
    #     saved_model = f.read()

    latest_model_path = os.path.join(traffic_prediction_path, 'saved_models',
                                     'Final_STGCN_Weights')
    checkpoint = torch.load(latest_model_path,
                            map_location=torch.device('cpu'))
    model_stgcn = model.Stgcn_Model(checkpoint['model_nodes_num'],
                                    checkpoint['model_features_num'],
                                    checkpoint['model_input_timesteps'],
                                    checkpoint['model_num_output'])
    model_stgcn.load_state_dict(checkpoint['state_dict'])
    optimizer = optim.Adam(model_stgcn.parameters(), lr=checkpoint['model_lr'])
    optimizer = optimizer.load_state_dict(checkpoint['opti_state_dict'])
    loaded_model = model_stgcn
    loaded_model.to(device)
    loaded_optimizer = optimizer

    predicted = model_utils.predict(loaded_model, test_input, adj_mat)
    predicted = predicted.cpu().numpy()
    predicted_denorm = preprocessing_utils.denormalize(predicted, stds[0],
                                                       means[0])

    return np.array(predicted_denorm), A, X, metadata
Ejemplo n.º 9
0
    def transfer_loss(self, x, labels):
        loss = torch.zeros([1], device=self.args_dict['device'])

        for arch, current_model in zip(self.similarity_coeffs.keys(), self.surrogate_models):
            predictions = predict(to_device(current_model, self.args_dict['device']), x)

            to_device(current_model, 'cpu')

            current_loss = self.criterion(predictions, labels)
            loss = torch.add(loss, self.optimization_direction * self.similarity_coeffs[arch] * current_loss)

        return loss
Ejemplo n.º 10
0
def prediction(model, vocab_to_int, int_to_vocab):
    """
    Run prediction using user presets
    """
    st.sidebar.header("Prediction")
    top_k = st.sidebar.number_input("Top K words", value=3)

    DEFAULT_STARTING_LYRICS = "Enter Lyrics..."
    starting_lyrics = st.sidebar.text_input("Starting Lyrics",
                                            value=DEFAULT_STARTING_LYRICS)

    whole_song = False
    if starting_lyrics == DEFAULT_STARTING_LYRICS:
        st.sidebar.write("or...")
        whole_song = st.sidebar.checkbox(label="Generate a whole song")
    else:
        ending_lyrics = None
        length_of_output = st.sidebar.number_input("# Of lyrics to generate",
                                                   100)

    if whole_song:
        starting_lyrics = "<songstart>"
        ending_lyrics = "<songend>"
        length_of_output = 1e5
    else:
        starting_lyrics = starting_lyrics
        ending_lyrics = None

    if starting_lyrics != DEFAULT_STARTING_LYRICS:
        prediction = predict(
            model=model,
            device=DEVICE,
            starting_string=starting_lyrics,
            vocab_to_int=vocab_to_int,
            int_to_vocab=int_to_vocab,
            output_len=length_of_output,
            stopping_word=ending_lyrics,
            top_k=top_k,
        )

        st.header("And here is your new masterpiece!")
        st.text(prediction)
Ejemplo n.º 11
0
def main():
    # Get the input arguments
    input_arguments = process_arguments()
    
    # Set the device to cuda if specified
    default_device = torch.device("cuda" if torch.cuda.is_available() and input_arguments.gpu else "cpu")
    
    # Predict
    probs, classes = mu.predict(input_arguments.input_image_path, 
                                input_arguments.checkpoint_file_path,
                                default_device,
                                input_arguments.topk)
    
    
    i = 0
    for specie in classes:
        print("your dataset named : " + specie + " predicted with probability: " + str(probs[i]))
        i += 1
    
    pass
Ejemplo n.º 12
0
def dopredict():
    """
    给定一个文件或一句话,预测结果
    :return:
    """
    parser = argparse.ArgumentParser(description='Text CNN 分类器')
    #必须指定已经训练好的模型
    parser.add_argument('--path',
                        type=str,
                        default="data/predict/",
                        help='要进行预测的文本文件的路径,或文件夹')
    parser.add_argument('--model',
                        type=str,
                        default="model/textcnn.model",
                        help='读取model进行预测')
    conf = Config()
    args = parser.parse_args()
    #指定Field格式
    text_field = data_utils.TextTEXT
    label_field = data_utils.TextLABEL
    text_field.vocab = data_utils.load_vocab("model/text.vocab")
    label_field.vocab = data_utils.load_vocab("model/label.vocab")
    # 模型加载和初始化
    if os.path.exists(args.model):
        print('发现模型文件, 加载模型: {}'.format(args.model))
        cnn = torch.load(args.model)
    else:
        print("未找到模型文件,退出")
        sys.exit(-1)
    #如果是文件夹,那么预测里面的文件,否则就是文件,直接预测
    if os.path.isdir(args.path):
        files = os.listdir(args.path)
        files_path = [args.path + f for f in files]
    else:
        files_path = [args.path]
    #开始预测
    for file in files_path:
        text, label = model_utils.predict(file, cnn, text_field, label_field,
                                          conf.cuda)
        print('[path]  {}\n[Text]  {}\n[Label] {}\n'.format(file, text, label))
    print(f'共预测{len(files_path)}个文件')
def main():
    start_time = time()
    
    # Handle Arguments
    in_arg = get_input_args_predict()
    print(in_arg)
    
    # Load checkpoint and rebuild network
    model = model_utils.load_checkpoint(in_arg.input, in_arg.gpu)
    # Process image
    image = data_image_utils.process_image(in_arg.image_path)
    # Label mapping
    cat_to_name = data_image_utils.get_label_mapping(in_arg.category_names)
    # Predict
    probs, classes = model_utils.predict(Variable(image).unsqueeze(0), model, in_arg.top_k)
    model_utils.print_prediction(classes, probs, model.class_to_idx, cat_to_name)
    
    tot_time = time()- start_time
    print("\n** Total Elapsed Runtime:",
          str(int((tot_time/3600)))+":"+str(int((tot_time%3600)/60))+":"
          +str(int((tot_time%3600)%60)) )
Ejemplo n.º 14
0
def main():
    # Get the input arguments
    input_arguments = process_arguments()

    # Set the device to cuda if specified
    default_device = torch.device(
        "cuda" if torch.cuda.is_available() and input_arguments.gpu else "cpu")

    # Predict
    probs, classes = mu.predict(input_arguments.input_image_path,
                                input_arguments.checkpoint_file_path,
                                default_device, input_arguments.topk)

    # Extract species
    species = du.extract_mapping(input_arguments.cat_name_file, classes)

    i = 0
    for specie in species:
        print("Flower named : " + species[i] +
              " predicted with probability: " + str(probs[i]))
        i += 1

    pass
Ejemplo n.º 15
0
 def run_predict_cv(self, df):
     ds = datasets.SpectrogramDataset(
         df,
         self.data_dir,
         sample_rate=self.config.sample_rate,
         composer=self.val_composer,
     )
     dataloader = torch.utils.data.DataLoader(
         ds, shuffle=False, **self.config.dataloader
     )
     preds = np.zeros((len(df), self.n_class))
     for i_fold, _ in enumerate(self.fold_indices):
         model_path = self.save_dir / f"best_model_fold{i_fold}.pth"
         model = model_utils.load_pytorch_model(
             model_name=self.config.model.name,
             path=model_path,
             n_class=self.n_class,
             in_chans=self.config.model.in_chans,
         )
         preds += model_utils.predict(
             model, dataloader, self.n_class, self.device, sigmoid=True
         )
     preds /= len(self.fold_indices)
     return preds
Ejemplo n.º 16
0
                    help='Path to model checkoint to predict with.')
parser.add_argument('--top_k',
                    nargs='?',
                    default=def_top_k,
                    type=int,
                    help='Number of best predictions to show.')
parser.add_argument(
    '--category_names',
    nargs='?',
    default=def_category_names,
    type=str,
    help=
    f'Filepath to mappging of labels to be used in place of numerical categories. Default is {def_category_names}.'
)
parser.add_argument('--gpu',
                    action='store_true',
                    help='Pass this flag to use GPU if available.')

args = parser.parse_args()
print(args)

model = load_from_checkpoint(args.checkpoint_filepath)

probs, classes = predict(args.image_filepath, model, args.gpu, args.top_k)

labal_map = get_category_labels(args.category_names)
labels = labels = [labal_map[cls + 1] for cls in classes]
print(probs)
print(classes)
print(labels)
Ejemplo n.º 17
0
def main():
    time = get_current_time()

    parser = argparse.ArgumentParser()
    parser.add_argument('--model', type=str, choices=ARCHS_LIST, default='resnet50')
    parser.add_argument('--dataset', type=str, default='dataset/imagenet')
    parser.add_argument('--masks', default=False, action='store_true')
    parser.add_argument('--num_samples', type=int, default=50)
    parser.add_argument('--gradient_priors', default=False, action='store_true')
    parser.add_argument('--grad_iterations', type=int, default=32)
    parser.add_argument('--attack_type', type=str, choices=['nes', 'simba'], default='simba')
    parser.add_argument('--conv', default=False, action='store_true')
    parser.add_argument('--substitute_model', type=str, choices=ARCHS_LIST, default='resnet152')
    parser.add_argument('--ensemble_selection', default=False, action='store_true')
    parser.add_argument('--transfer', default=False, action='store_true')
    parser.add_argument('--eps', type=float, default=1)
    parser.add_argument('--step_size', type=float, default=1/255.0)
    parser.add_argument('--num_iterations', type=int, default=1)
    parser.add_argument('--save_file_location', type=str, default='results/blackbox/' + time + '.pt')
    args_dict = vars(parser.parse_args())

    validate_save_file_location(args_dict['save_file_location'])

    model = get_model(args_dict['model'], pretrained=True, freeze=True).cuda().eval()

    if not args_dict['masks']:
        dataset = load_imagenet(args_dict['dataset'])
        loader, _ = dataset.make_loaders(workers=10, batch_size=1)
    else:
        loader = torch.load(args_dict['dataset'])

    adversarial_examples_list = []
    predictions_list = []
    substitute_model, criterion, pgd_attacker = None, None, None

    if args_dict['attack_type'] == 'nes':
        attack = nes
    else:
        attack = simba
        if args_dict['gradient_priors']:
            if args_dict['ensemble_selection']:
                pgd_attacker = Attacker(model.cuda(), PGD_DEFAULT_ARGS_DICT)
                pgd_attacker.args_dict['label_shifts'] = 0
                pgd_attacker.available_surrogates_list = ARCHS_LIST
                pgd_attacker.available_surrogates_list.remove(args_dict['model'])
            else:
                substitute_model = get_model(args_dict['substitute_model'],
                                             pretrained=True, freeze=True).cuda().eval()

    for index, entry in enumerate(loader):
        if args_dict['masks']:
            image, mask = entry
            image.unsqueeze_(0)
            original_prediction = predict(model, image.cuda())
            label = torch.argmax(original_prediction, dim=1)
        else:
            image, label = entry
            mask = torch.ones_like(image)

            with torch.no_grad():
                original_prediction = predict(model, image.cuda())
                predicted_label = torch.argmax(original_prediction, dim=1)
                if label.item() != predicted_label.item():
                    continue

        criterion = torch.nn.CrossEntropyLoss(reduction='none')

        image.squeeze_(0)
        delta = attack(model, image.cuda(), label.cuda(), mask.cuda(),
                       args_dict, substitute_model, criterion, pgd_attacker)
        adversarial_example = (image.cuda() + delta).clamp(0, 1)

        with torch.no_grad():
            adversarial_prediction = predict(model, adversarial_example.unsqueeze(0))

        adversarial_examples_list.append(adversarial_example.cpu())
        predictions_list.append({'original': original_prediction.cpu(),
                                 'adversarial': adversarial_prediction.cpu()})

        if index == args_dict['num_samples'] - 1:
            break

    torch.save({'adversarial_examples': adversarial_examples_list,
                'predictions': predictions_list,
                'args_dict': args_dict},
               args_dict['save_file_location'])
Ejemplo n.º 18
0
def get_probabilities(model, x, y):
    with torch.no_grad():
        prediction = predict(model, x.unsqueeze(0))
        prediction_softmax = softmax(prediction, 1)
        prediction_softmax_y = prediction_softmax[0][y]
        return prediction_softmax_y
Ejemplo n.º 19
0
results = parser.parse_args()

checkpoint = results.checkpoint
image = results.image_path
top_k = results.topk
gpu_mode = results.gpu
cat_names = results.cat_name_dir

device = torch.device("cpu")
if gpu_mode == True:
    device = torch.device("cuda" if torch.cuda.is_available()
                           else "cpu")
    
with open(cat_names, 'r') as f:
    cat_to_name = json.load(f)
    

# Load model
loaded_model = load_checkpoint(checkpoint)
loaded_model.to(device)


# Carry out prediction
probs, classes = predict(image, loaded_model, device, top_k)

# Print probabilities and predicted classes
labels = [cat_to_name[c] for c in classes]
for p, c, l in zip(probs, classes, labels):
    print("Probability is {0:2f} for class {1} with corresponding label {2}".format(p, c, l))
Ejemplo n.º 20
0
    def run_train_cv(self):
        oof_preds = np.zeros((len(self.df), self.n_class))
        best_val_loss = 0
        for i_fold, (trn_idx, val_idx) in enumerate(self.fold_indices):
            self.logger.info("-" * 10)
            self.logger.info(f"fold: {i_fold}")
            train_df = self.df.iloc[trn_idx].reset_index(drop=True)
            val_df = self.df.iloc[val_idx].reset_index(drop=True)
            # concat nocall df
            # val_df = pd.concat([val_df, self.nocall_df]).reset_index()
            train_ds = datasets.SpectrogramDataset(
                train_df,
                self.data_dir,
                sample_rate=self.config.sample_rate,
                composer=self.train_composer,
                secondary_label=self.secondary_label,
            )
            valid_ds = datasets.SpectrogramDataset(
                val_df,
                self.data_dir,
                sample_rate=self.config.sample_rate,
                composer=self.val_composer,
                secondary_label=self.secondary_label
            )
            train_dl = torch.utils.data.DataLoader(
                train_ds, shuffle=True, **self.config.dataloader
            )

            # reduce batchsize for avoiding cudnn error
            valid_dl = torch.utils.data.DataLoader(
                valid_ds,
                shuffle=False,
                num_workers=self.config.dataloader.num_workers,
                batch_size=int(self.config.dataloader.batch_size / 2),
                pin_memory=self.config.dataloader.pin_memory,
            )
            model = model_utils.build_model(
                self.config.model.name,
                n_class=self.n_class,
                in_chans=self.config.model.in_chans,
                pretrained=self.config.model.pretrained,
            )
            if self.config.multi and self.config.gpu:
                self.logger.info("Using pararell gpu")
                model = nn.DataParallel(model)

            # criterion = nn.BCELoss()
            criterion = nn.BCEWithLogitsLoss()
            optimizer = optim.Adam(model.parameters(), float(self.config.learning_rate))
            scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, 10)
            best_model_path = self.save_dir / f"best_model_fold{i_fold}.pth"
            if self.config.mixup:
                self.logger.info("use mixup")
            best_val_loss += model_utils.train_model(
                epoch=self.epoch,
                model=model,
                train_loader=train_dl,
                val_loader=valid_dl,
                optimizer=optimizer,
                scheduler=scheduler,
                criterion=criterion,
                device=self.device,
                threshold=self.config.threshold,
                best_model_path=best_model_path,
                logger=self.logger,
                mixup=self.config.mixup,
            )
            model = model_utils.load_pytorch_model(
                model_name=self.config.model.name,
                path=best_model_path,
                n_class=self.n_class,
                in_chans=self.config.model.in_chans,
            )
            preds = model_utils.predict(
                model, valid_dl, self.n_class, self.device, sigmoid=True
            )
            oof_preds[val_idx, :] = preds
        # oof_score = self.metrics(self.y, oof_preds)
        best_val_loss /= len(self.fold_indices)
        return oof_preds, best_val_loss
Ejemplo n.º 21
0
'''

import torch
from torch import nn
from torch import optim

from model_utils import load_checkpoint, predict
from data_utils import load_data, label_mapping, process_image
from argument_parser import get_args_predict

args = get_args_predict()

if (args.device == 'gpu' and torch.cuda.is_available()):
    device = torch.device('cuda')
else:
    device = torch.device('cpu')

loaded_model = load_checkpoint(args.checkpoint, device)

probabilities, classes = predict(args.input, loaded_model, device, args.topk)

idx_to_name = label_mapping(args.json_file)
labels = [idx_to_name[str(i)] for i in classes]

# Print out result
i = 0
while i < args.topk:
    print(
        f"Image is classified as a {labels[i]} with a probability of {round(probabilities[i]*100,2)}%"
    )
    i += 1
Ejemplo n.º 22
0
 def normal_loss(self, x, labels):
     predictions = predict(self.model, x)
     loss = self.optimization_direction * self.criterion(predictions, labels)
     return loss
Ejemplo n.º 23
0
def main():
    args_dict = normalize_args_dict(get_args_dict())

    print('Running PGD experiment with the following arguments:')
    print(str(args_dict) + '\n')

    if args_dict['seed'] is not None:
        torch.manual_seed(args_dict['seed'])

    if args_dict['checkpoint_location'] is None:
        model = get_model(arch=args_dict['arch'], pretrained=True, freeze=True, device=args_dict['device']).eval()
    else:
        model = to_device(load_model(location=args_dict['checkpoint_location'],
                                     arch=args_dict['arch'],
                                     from_robustness=args_dict['from_robustness']).eval(),
                          'cuda')

    attacker = Attacker(model, args_dict)

    print('Loading dataset...')
    if args_dict['masks']:
        loader = torch.load(args_dict['dataset'])
    else:
        dataset = load_imagenet(args_dict['dataset'])
        loader, _ = dataset.make_loaders(workers=10, batch_size=args_dict['batch_size'])
    print('Finished!\n')

    mask_batch = 1
    total_num_samples = 0
    adversarial_examples_list = []
    predictions_list = []

    print('Starting PGD...')
    for index, batch in enumerate(loader):
        if args_dict['masks']:
            image_batch, mask_batch = batch
            image_batch.unsqueeze_(0)
            mask_batch.unsqueeze_(0)

            label_batch = torch.argmax(predict(model, to_device(image_batch, args_dict['device'])), dim=1)
            if mask_batch.size != image_batch.size():
                mask_batch = 1
            else:
                mask_batch = to_device(mask_batch, device=args_dict['device'])
        else:
            image_batch, label_batch = batch

        image_batch = to_device(image_batch, device=args_dict['device'])
        label_batch = to_device(label_batch, device=args_dict['device'])

        if not args_dict['targeted'] and not args_dict['masks']:
            predicted_label_batch = torch.argmax(predict(model, image_batch), dim=1)
            matching_labels = torch.eq(label_batch, predicted_label_batch)
            num_matching_labels = torch.sum(matching_labels)
            if num_matching_labels == 0:
                continue

            image_batch, label_batch = (image_batch[matching_labels],
                                        label_batch[matching_labels])

            if mask_batch != 1:
                mask_batch = mask_batch[matching_labels]

            targets = label_batch
        else:
            targets = TARGET_CLASS * torch.ones_like(label_batch)

        adversarial_examples = attacker(image_batch, mask_batch, targets, False)
        adversarial_predictions = predict(model, adversarial_examples)

        adversarial_examples_list.append(to_device(adversarial_examples, device='cpu'))
        predictions_list.append({'original': to_device(targets, device='cpu'),
                                 'adversarial': to_device(adversarial_predictions, device='cpu')})

        total_num_samples += image_batch.size(0)
        if total_num_samples >= args_dict['num_samples']:
            break

    args_dict['num_samples'] = total_num_samples
    print('Finished!')

    print('Serializing results...')
    torch.save({'adversarial_examples': adversarial_examples_list,
                'predictions': predictions_list,
                'similarity': ALL_SIMILARITY_COEFFS,
                'args_dict': args_dict},
               args_dict['save_file_location'])
    print('Finished!\n')