def main():
    print('Preparing data')

    if args.dataset == 'cocotext':
        # Similar Number of classes as Con-Text (Proxy Task)
        num_classes = 28
        weight_file = '/tmp-network/user/rsampaio/models/finegrained-classif/context_RMAC_Full_fisher_yolo_phoc_mlb_ep3.0/checkpoint_mlb_3.0_L1_0.7800202876657005.weights'
    else:
        print('Dataset error')

    embedding_size = get_embedding_size(args.embedding)
    print('Loading Model')
    net = load_model(args, num_classes, embedding_size)
    checkpoint = load_checkpoint(weight_file)
    net.load_state_dict(checkpoint)

    print('Checking CUDA')
    if args.cuda and args.ngpu > 1:
        print('\t* Data Parallel **NOT TESTED**')
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

    if args.cuda:
        print('\t* CUDA ENABLED!')
        net = net.cuda()

    print('\n*** TEST ***\n')
    test(args, net, args.cuda)
    print('*** Feature Extraction Completed ***')
    sys.exit()
def main():
    print('Preparing data')

    if args.dataset == 'context':
        num_classes = 28
        weight_file = '/SSD/fine_grained_classification_with_textual_cues/backup/context_orig_fisherNet_fisher_yolo_phoc_concat/checkpoint7988.weights'
    else:
        num_classes = 20
        weight_file = '/SSD/fine_grained_classification_with_textual_cues/backup/bottles_orig_fisherNet_fisher_yolo_phoc_concat/checkpoint7690.weights'

    embedding_size = get_embedding_size(args.embedding)
    print('Loading Model')
    net = load_model(args, num_classes, embedding_size)
    checkpoint = load_checkpoint(weight_file)
    net.load_state_dict(checkpoint)

    print('Checking CUDA')
    if args.cuda and args.ngpu > 1:
        print('\t* Data Parallel **NOT TESTED**')
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

    if args.cuda:
        print('\t* CUDA ENABLED!')
        net = net.cuda()

    print('\n*** TEST ***\n')
    test(args, net, args.cuda, num_classes)
    print('*** Feature Extraction Completed ***')
    sys.exit()
Exemple #3
0
def keract_visual(model_path, x=None):
    if x is None:
        raise ValueError('x cannot be None.')

    model = load_model(model_path)

    visualizer = vis.Visualizer(model, x)
    visualizer.init_model_activations()
    visualizer.display_model_activations()
 def __init__(self, debug=False):
     _, (self.test_x, self.test_y) = load_data('mnist')
     self.debug = debug
     if not debug:
         self.model_names = os.listdir('data/models')
     else:
         self.model_names = os.listdir('data/models')[:2]
     self.model_models = {}
     print("Loading models")
     for tmp in self.model_names:
         self.model_models[tmp] = models.load_model(tmp)
     print("Finished loading models!")
Exemple #5
0
def init_candidate_targets(weak_defenses_file):
    """
    :return: the list of candidate targets, which are
            sorted descendingly according to their effectiveness.
    """
    # load all models
    candidates = {}
    waives = get_waive_list()

    with open(os.path.join(PATH.MODEL, weak_defenses_file),
              'r') as weak_defense_list:
        for weak_defense in weak_defense_list:
            weak_defense = weak_defense[:-1]  # remove the \newline character
            print('...Loading weak defense {}'.format(weak_defense))
            name = weak_defense.split('.')[0].split('-')[-1]
            if name in waives:
                print('Ignore [{}]'.format(name))
                continue

            candidates[name] = load_model(weak_defense)

    return candidates
Exemple #6
0
def main():
    print('Preparing data')

    num_classes = get_num_classes(args)
    embedding_size = get_embedding_size(args.embedding)
    grad_clip = args.grad_clip

    train_data, test_data, gt_annotations, text_embedding = load_data(
        args, embedding_size)

    #sampler = torch.utils.data.sampler.WeightedRandomSampler(weights, len(weights))

    train_loader = DataLoader(train_data,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.prefetch,
                              pin_memory=True)
    test_loader = DataLoader(test_data,
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=args.prefetch,
                             pin_memory=True)

    print('Creating Model')
    net = load_model(args, num_classes, embedding_size)

    print('Optimizer: ', args.optim)
    if args.optim == 'sgd':
        optim = torch.optim.SGD(net.parameters(),
                                args.learning_rate,
                                momentum=args.momentum,
                                weight_decay=args.decay,
                                nesterov=True)
    elif args.optim == 'adam':
        optim = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        net.parameters()),
                                 args.learning_rate,
                                 betas=(0.9, 0.999),
                                 eps=1e-08,
                                 weight_decay=args.decay)
    elif args.optim == 'radam':
        optim_base = RAdam(filter(lambda p: p.requires_grad, net.parameters()),
                           args.learning_rate)
        optim = Lookahead(optim_base, k=5, alpha=0.5)
    else:
        print("Optimizer not implemented")

    net_params = net.parameters()

    # Weight Tensor for Criterion
    weights = get_weight_criterion(args.dataset)

    class_weights = torch.FloatTensor(weights).cuda()
    criterion = nn.CrossEntropyLoss(weight=class_weights)

    evaluation = None

    print('Checking CUDA')
    if args.cuda and args.ngpu > 1:
        print('\t* Data Parallel **NOT TESTED**')
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

    if args.cuda:
        print('\t* CUDA ENABLED!')
        net = net.cuda()
        criterion = criterion.cuda()

    # Init variables
    early_stop_counter, start_epoch, best_perf = 0, 0, 0

    if args.load is not None:
        checkpoint = load_checkpoint(args.load)
        net.load_state_dict(checkpoint)

    if args.test == 'False':

        for epoch in range(start_epoch, args.epochs):
            # Update learning rate
            adjust_learning_rate(optim, epoch)

            print('\n*** TRAIN ***\n')
            loss = train(train_loader, net, optim, args.cuda, criterion, epoch,
                         args.log_interval, num_classes, args.batch_size,
                         net_params, grad_clip)
            print('\n*** TEST ***\n')
            performance = test(test_loader, net, args.cuda, num_classes,
                               args.batch_size, args)

            # Early-Stop + Save model

            if performance > best_perf:
                best_perf = performance
                best_epoch = epoch
                early_stop_counter = 0
                if args.save_weights == 'True':
                    save_checkpoint(net,
                                    best_perf,
                                    directory=args.save,
                                    file_name='checkpoint',
                                    data_weights=args.dataset)
            else:
                if early_stop_counter == args.early_stop:
                    print('\nEarly stop reached!')
                    break
                early_stop_counter += 1

        # Load Best model in case of save it
        print("\nBest Performance is: %f at Epoch No. %d" %
              (best_perf, best_epoch))

    else:
        print('\n*** TEST ***\n')
        performance = test(test_loader, net, args.cuda, num_classes,
                           args.batch_size, args)

    print('*** Process Completed ***')
    sys.exit()
Exemple #7
0
def train():
    train_loader = generator.Generator(args.dataset_root,
                                       args,
                                       partition='train',
                                       dataset=args.dataset)
    logger.info('Batch size: ' + str(args.batch_size))

    #Try to load models
    enc_nn = models.load_model('enc_nn', args)
    metric_nn = models.load_model('metric_nn', args)

    if enc_nn is None or metric_nn is None:
        enc_nn, metric_nn = models.create_models(args=args)
    softmax_module = models.SoftmaxModule()

    if args.cuda:
        enc_nn.cuda()
        metric_nn.cuda()

    logger.info(str(enc_nn))
    logger.info(str(metric_nn))

    weight_decay = 0
    if args.dataset == 'mini_imagenet':
        logger.info('Weight decay ' + str(1e-6))
        weight_decay = 1e-6
    opt_enc_nn = optim.Adam(enc_nn.parameters(),
                            lr=args.lr,
                            weight_decay=weight_decay)
    opt_metric_nn = optim.Adam(metric_nn.parameters(),
                               lr=args.lr,
                               weight_decay=weight_decay)

    model_summary([enc_nn, metric_nn])
    optimizer_summary([opt_enc_nn, opt_metric_nn])
    enc_nn.train()
    metric_nn.train()
    counter = 0
    total_loss = 0
    val_acc, val_acc_aux = 0, 0
    test_acc = 0
    for batch_idx in range(args.iterations):

        ####################
        # Train
        ####################
        data = train_loader.get_task_batch(
            batch_size=args.batch_size,
            n_way=args.train_N_way,
            unlabeled_extra=args.unlabeled_extra,
            num_shots=args.train_N_shots,
            cuda=args.cuda,
            variable=True)
        [
            batch_x, label_x, _, _, batches_xi, labels_yi, oracles_yi,
            hidden_labels
        ] = data

        opt_enc_nn.zero_grad()
        opt_metric_nn.zero_grad()

        loss_d_metric = train_batch(model=[enc_nn, metric_nn, softmax_module],
                                    data=[
                                        batch_x, label_x, batches_xi,
                                        labels_yi, oracles_yi, hidden_labels
                                    ])

        opt_enc_nn.step()
        opt_metric_nn.step()

        adjust_learning_rate(optimizers=[opt_enc_nn, opt_metric_nn],
                             lr=args.lr,
                             iter=batch_idx)

        ####################
        # Display
        ####################
        counter += 1
        total_loss += loss_d_metric.data[0]
        if batch_idx % args.log_interval == 0:
            display_str = 'Train Iter: {}'.format(batch_idx)
            display_str += '\tLoss_d_metric: {:.6f}'.format(total_loss /
                                                            counter)
            logger.info(display_str)
            counter = 0
            total_loss = 0

        ####################
        # Test
        ####################
        if (batch_idx + 1) % args.test_interval == 0 or batch_idx == 20:
            if batch_idx == 20:
                test_samples = 100
            else:
                test_samples = 3000
            if args.dataset == 'mini_imagenet':
                val_acc_aux = test.test_one_shot(
                    args,
                    model=[enc_nn, metric_nn, softmax_module],
                    test_samples=test_samples * 5,
                    partition='val')
            test_acc_aux = test.test_one_shot(
                args,
                model=[enc_nn, metric_nn, softmax_module],
                test_samples=test_samples * 5,
                partition='test')
            test.test_one_shot(args,
                               model=[enc_nn, metric_nn, softmax_module],
                               test_samples=test_samples,
                               partition='train')
            enc_nn.train()
            metric_nn.train()

            if val_acc_aux is not None and val_acc_aux >= val_acc:
                test_acc = test_acc_aux
                val_acc = val_acc_aux

            if args.dataset == 'mini_imagenet':
                logger.info("Best test accuracy {:.4f} \n".format(test_acc))

        ####################
        # Save model
        ####################
        if (batch_idx + 1) % args.save_interval == 0:
            logger.info("saving model...")
            torch.save(enc_nn,
                       os.path.join(logger.get_logger_dir(), 'enc_nn.t7'))
            torch.save(metric_nn,
                       os.path.join(logger.get_logger_dir(), 'metric_nn.t7'))

    # Test after training
    test.test_one_shot(args,
                       model=[enc_nn, metric_nn, softmax_module],
                       test_samples=args.test_samples)
Exemple #8
0
def get_adversarial_examples(model_name, attack_method, X, Y, **kwargs):
    # remove the file format
    model_name = model_name.split('.')[0]
    dataset = DATA.CUR_DATASET_NAME

    if ((not os.path.isfile('{}/{}.h5'.format(PATH.MODEL, model_name))) and
        (not os.path.isfile('{}/{}.json'.format(PATH.MODEL, model_name)))):
        raise FileNotFoundError(
            'Could not file target mode [{}].'.format(model_name))

    config = tf.ConfigProto(intra_op_parallelism_threads=4,
                            inter_op_parallelism_threads=4)

    sess = tf.Session(config=config)
    keras.backend.set_session(sess)

    model = load_model(model_name)

    logger.info('Crafting adversarial examples using {} method...'.format(
        attack_method.upper()))
    X_adv = None

    if (attack_method == ATTACK.FGSM):
        eps = kwargs.get('eps', 0.25)
        attack_params = {
            'eps': eps,
            'ord': np.inf,
            'clip_min': 0.,
            'clip_max': 1.
        }
        logger.info('{}: (eps={})'.format(attack_method.upper(), eps))

        start_time = time.time()
        X_adv, Y = whitebox.generate_art(model, X, Y, attack_method, dataset,
                                         attack_params)
        # X_adv, Y = whitebox.generate(sess, model, X, Y, attack_method, dataset, attack_params)
        duration = time.time() - start_time
        logger.info('Time cost for generation: {}'.format(duration))

    elif (attack_method == ATTACK.BIM):
        # iterative fast gradient method
        eps = kwargs.get('eps', 0.25)
        nb_iter = kwargs.get('nb_iter', 100)
        ord = kwargs.get('ord', np.inf)
        """
        Cleverhans requires an eps_iter that is smaller than the eps. 
        By default, eps_iter=0.05, so, update eps_iter for small epsilons.
        """
        if eps < 0.005:
            eps_iter = 0.001
        elif eps < 0.05:
            eps_iter = 0.005
        else:
            # for big enough eps, use the default setting
            eps_iter = 0.05
        attack_params = {
            'eps': eps,
            'eps_iter': eps_iter,
            'nb_iter': nb_iter,
            'ord': ord,
            'clip_min': 0.,
            'clip_max': 1.
        }

        logger.info('{}: (ord={}, nb_iter={}, eps={})'.format(
            attack_method.upper(), ord, nb_iter, eps))
        start_time = time.time()
        X_adv, Y = whitebox.generate(sess, model, X, Y, attack_method, dataset,
                                     attack_params)

        duration = time.time() - start_time
        print('Time cost: {}'.format(duration))
    elif (attack_method == ATTACK.DEEPFOOL):
        # Images for inception classifier are normalized to be in [0, 255] interval.
        # max_iterations = kwargs.get('max_iterations', 100)
        # keras.backend.set_learning_phase(True)
        max_iterations = 50
        ord = kwargs.get('ord', 2)
        overshoot = kwargs.get('overshoot', 0.02)

        attack_params = {
            'ord': ord,
            'max_iterations': max_iterations,
            'nb_candidate': int(Y.shape[1] / 2),
            'overshoot': overshoot,
            'clip_min': 0.,
            'clip_max': 255.
        }

        print(attack_params)
        logger.info('{}: (max_iterations={})'.format(attack_method.upper(),
                                                     max_iterations))

        X *= 255.
        Y *= 255

        start_time = time.time()
        X_adv, Y = whitebox.generate(sess, model, X, Y, attack_method, dataset,
                                     attack_params)

        X /= 255.
        Y /= 255.
        X_adv /= 255.

        duration = time.time() - start_time
        print('Time cost: {}'.format(duration))

    elif (attack_method == ATTACK.CW_L2):
        ord = 2
        binary_search_steps = kwargs.get('binary_search_steps', 10)
        batch_size = kwargs.get('cw_batch_size', 2)
        initial_const = kwargs.get('initial_const', 10)
        learning_rate = kwargs.get('learning_rate', 0.1)
        max_iterations = kwargs.get('max_iterations', 100)

        attack_params = {
            'batch_size': batch_size,
            'binary_search_steps': binary_search_steps,
            'initial_const': initial_const,
            'learning_rate': learning_rate,
            'max_iterations': max_iterations,
            'clip_min': 0.,
            'clip_max': 1.
        }

        logger.info('{}: (ord={}, max_iterations={})'.format(
            attack_method.upper(), ord, max_iterations))

        start_time = time.time()
        X_adv, Y = whitebox.generate(sess, model, X, Y, attack_method, dataset,
                                     attack_params)
        duration = time.time() - start_time
        logger.info('Time cost: {}'.format(duration))

    elif (attack_method == ATTACK.CW_Linf):
        ord = np.inf

        decrease_factor = kwargs.get('decrease_factor', 0.9)
        initial_const = kwargs.get('initial_const', 1e-5)
        learning_rate = kwargs.get('learning_rate', 0.1)
        largest_const = kwargs.get('largest_const', 2e+1)
        max_iterations = kwargs.get('max_iterations', 1000)
        reduce_const = False
        const_factor = 3.0

        attack_params = {
            # 'descrease_factor': decrease_factor,
            'initial_const': initial_const,
            'learning_rate': learning_rate,
            'max_iterations': max_iterations,
            'largest_const': largest_const,
            'reduce_const': reduce_const,
            'const_factor': const_factor,
            'clip_min': 0.,
            'clip_max': 1.
        }

        logger.info('{}: (ord={}, max_iterations={})'.format(
            attack_method.upper(), ord, max_iterations))

        start_time = time.time()
        X_adv, Y = whitebox.generate(sess, model, X, Y, attack_method, dataset,
                                     attack_params)
        duration = time.time() - start_time
        logger.info('Time cost: {}'.format(duration))

    elif (attack_method == ATTACK.CW_L0):
        max_iterations = kwargs.get('max_iterations', 1000)
        initial_const = kwargs.get('initial_const', 10)
        largest_const = kwargs.get('largest_const', 15)

        attack_params = {
            'max_iterations': max_iterations,
            'initial_const': initial_const,
            'largest_const': largest_const
        }

        X_adv, Y = whitebox.generate(sess, model, X, Y, attack_method,
                                     attack_params)

    elif (attack_method == ATTACK.JSMA):
        theta = kwargs.get('theta', 0.6)
        gamma = kwargs.get('gamma', 0.5)
        attack_params = {
            'theta': theta,
            'gamma': gamma,
            'clip_min': 0.,
            'clip_max': 1.
        }

        logger.info('{}: (theta={}, gamma={})'.format(attack_method.upper(),
                                                      theta, gamma))
        start_time = time.time()
        X_adv, Y = whitebox.generate(sess, model, X, Y, attack_method, dataset,
                                     attack_params)
        duration = time.time() - start_time
        logger.info('Time cost: {}'.format(duration))

    elif (attack_method == ATTACK.PGD):
        eps = kwargs.get('eps', 0.3)
        nb_iter = kwargs.get('nb_iter', 40)
        eps_iter = kwargs.get('eps_iter', 0.01)

        attack_params = {
            'eps': eps,
            'clip_min': 0.,
            'clip_max': 1.,
            'nb_iter': nb_iter,
            'eps_iter': eps_iter
        }

        start_time = time.time()
        X_adv, Y = whitebox.generate(sess, model, X, Y, attack_method, dataset,
                                     attack_params)
        duration = time.time() - start_time
        logger.info('Time cost: {}'.format(duration))

    elif (attack_method == ATTACK.ONE_PIXEL):
        # one-pixel was implemented separately.
        targeted = kwargs.get('targeted', False)
        pixel_counts = kwargs.get('pixel_counts', 3)
        max_iter = kwargs.get('max_iter', 10)
        pop_size = kwargs.get('pop_size', 10)

        attack_params = {
            'targeted': targeted,
            'pixel_counts': pixel_counts,
            'max_iter': max_iter,
            'pop_size': pop_size,
            'clip_min': 0.,
            'clip_max': 1.,
        }

        start_time = time.monotonic()
        X_adv, Y = one_pixel.generate(model, X, Y, attack_params)
        duration = time.monotonic() - start_time
        logger.info('Time cost: {}'.format(duration))

    elif (attack_method == ATTACK.MIM):
        eps = kwargs.get('eps', 0.3)
        eps_iter = kwargs.get('eps_iter', 0.06)
        nb_iter = kwargs.get('nb_iter', 10)
        decay_factor = kwargs.get('decay_factor', 0.5)
        y_target = kwargs.get('y_target', None)

        attack_params = {
            'eps': eps,
            'eps_iter': eps_iter,
            'nb_iter': nb_iter,
            'ord': np.inf,
            'decay_factor': decay_factor,
            'y_target': y_target,
            'clip_min': 0.,
            'clip_max': 1.
        }

        start_time = time.time()
        X_adv, Y = whitebox.generate(sess, model, X, Y, attack_method, dataset,
                                     attack_params)
        duration = time.time() - start_time
        logger.info('Time cost: {}'.format(duration))

    print('*** SHAPE: {}'.format(np.asarray(X_adv).shape))

    del model
    sess.close()

    return X_adv, Y
Exemple #9
0
def main():
    print('Preparing data')
    data_path = args.base_dir

    if args.dataset == 'context':
        num_classes = 28
        weight_file = '/SSD/GCN_classification/best/context_fullGCN_bboxes_fasttext_google_ocr_concat_mean_split1/checkpoint_context.weights'

        with open(data_path + '/Context/data/split_1.json', 'r') as fp:
            gt_annotations = json.load(fp)
        with open(
                data_path + '/Context/' + args.ocr +
                '/text_embeddings/Context_' + args.embedding + '.json',
                'r') as fp:
            text_embedding = json.load(fp)

        # Load Local features from Faster R-CNN VG
        with open(data_path + '/Context/context_local_feats.npy', 'rb') as fp:
            local_feats = np.load(fp, encoding='bytes')

            # Create img_name to index of local features
        with open(data_path + '/Context/context_local_feats_image_ids.txt',
                  'r') as fp:
            image_ids = fp.readlines()
        image_name2features_index = {}
        for item in image_ids:
            img_name = item.strip().split(',')[0].split('/')[-1].replace(
                '\'', '')
            idx = item.strip().split(',')[1].replace(')', '').replace(' ', '')
            image_name2features_index[img_name] = idx

        # BBOXES LOADING FOR TEXT FEATURES
        # Load BBOXES of Scene Text
        with open(data_path + '/Context/google_ocr/bboxes/Context_bboxes.json',
                  'r') as fp:
            text_bboxes = json.load(fp)

        # Load BBOXES of Local Visual Features
        with open(data_path + '/Context/context_bboxes.npy', 'rb') as fp:
            local_bboxes = np.load(fp, encoding='bytes')

    else:
        num_classes = 20
        weight_file = '/SSD/GCN_classification/best/bottles_fullGCN_bboxes_fasttext_google_ocr_concat_mean_split2/checkpoint_bottles.weights'

        with open(data_path + '/Drink_Bottle/split_2.json', 'r') as fp:
            gt_annotations = json.load(fp)
        with open(
                data_path + '/Drink_Bottle/' + args.ocr +
                '/text_embeddings/Drink_Bottle_' + args.embedding + '.json',
                'r') as fp:
            text_embedding = json.load(fp)

        # Load Local features from Faster R-CNN VG
        with open(data_path + '/Drink_Bottle/bottles_local_feats.npy',
                  'rb') as fp:
            local_feats = np.load(fp, encoding='bytes')

            # Create img_name to index of local features
        with open(
                data_path + '/Drink_Bottle/bottles_local_feats_image_ids.txt',
                'r') as fp:
            image_ids = fp.readlines()
        image_name2features_index = {}
        for item in image_ids:
            # Sample: ('/SSD/Datasets/Drink_Bottle/images/14/982.jpg', 0)
            img_name = item.strip().split(',')[0].replace('\'',
                                                          '').split('/')[-3:]
            img_name = img_name[0] + '/' + img_name[1] + '/' + img_name[2]
            idx = item.strip().split(',')[1].replace(')', '').replace(' ', '')
            image_name2features_index[img_name] = idx

        # BBOXES LOADING FOR TEXT FEATURES
        # Load BBOXES of Scene Text
        with open(
                data_path +
                '/Drink_Bottle/google_ocr/bboxes/Drink_Bottle_bboxes.json',
                'r') as fp:
            text_bboxes = json.load(fp)

        # Load BBOXES of Local Visual Features
        with open(data_path + '/Drink_Bottle/bottles_bboxes.npy', 'rb') as fp:
            local_bboxes = np.load(fp, encoding='bytes')

    embedding_size = get_embedding_size(args.embedding)
    print('Loading Model')
    net = load_model(args, num_classes, embedding_size)
    checkpoint = load_checkpoint(weight_file)
    net.load_state_dict(checkpoint)

    print('Checking CUDA')
    if args.cuda and args.ngpu > 1:
        print('\t* Data Parallel **NOT TESTED**')
        net = torch.nn.DataParallel(net, device_ids=list(range(args.ngpu)))

    if args.cuda:
        print('\t* CUDA ENABLED!')
        net = net.cuda()

    print('\n*** TEST ***\n')
    test(args, net, args.cuda, num_classes, gt_annotations, text_embedding,
         local_feats, image_name2features_index, text_bboxes, local_bboxes)
    print('*** Feature Extraction Completed ***')
    sys.exit()
Exemple #10
0
def gen_greedy(dataset,
               attacker=ATTACK.FGSM,
               attack_count=None,
               strategy=ATTACK_STRATEGY.RANDOM.value):
    config = tf.ConfigProto(intra_op_parallelism_threads=4,
                            inter_op_parallelism_threads=4)
    sess = tf.Session(config=config)
    keras.backend.set_session(sess)

    # candidates = init_candidate_targets('ensemble/mnist_weak_defenses_fsgm.list')
    model_names = os.listdir('data/models')
    candidates = {}

    if MODE.DEBUG:
        tmp = model_names[:2]
    else:
        tmp = model_names
    for name in tmp:
        candidates[name.split('.')[0]] = load_model(name)

    print('...In total {} weak defenses.'.format(len(candidates)))

    prefix = 'wb'  # white-box

    if attack_count == None or attack_count <= 0:
        prefix = 'gb'  # gray-box
        attack_count = len(candidates.keys())

    X_adv = []
    _, (X, Y) = load_data(dataset=dataset)

    # generate 500 samples
    batch_size = 100
    nb_samples = Y.shape[0]
    nb_iter = int(nb_samples / batch_size)

    if MODE.DEBUG:
        X = X[:5]
        Y = Y[:5]
        batch_size = 1

    start = time.monotonic()
    for i in range(nb_iter):
        start_idx = i * batch_size
        end_idx = min((i + 1) * batch_size, nb_samples)
        print(start_idx, end_idx)
        X_batch = X[start_idx:end_idx]
        Y_batch = Y[start_idx:end_idx]

        print('...In total {} inputs.'.format(Y.shape[0]))
        idx = 0
        for x, y in zip(X_batch, Y_batch):
            print('{}-th input...'.format(idx))

            x = np.expand_dims(x, axis=0)

            strategy = ATTACK_STRATEGY.RANDOM.value
            '''
            generate_single(sess, x, y, attacker=ATTACK.FGSM,
                        candidates=None,
                        attack_count=None,
                        max_perturb=get_perturb_upperbound(),
                        strategy=ATTACK_STRATEGY.RANDOM.value)
            '''
            start_sample = time.monotonic()
            X_adv.append(
                generate_single(sess,
                                x,
                                y,
                                attacker,
                                candidates,
                                attack_count,
                                strategy=strategy))
            end_sample = time.monotonic()
            print('({}, {})-th sample: {}\n\n'.format(
                i, idx, (end_sample - start_sample)))
            idx += 1

        save_adv_examples(np.asarray(X_adv),
                          prefix=prefix,
                          bs_samples=X_batch,
                          dataset=dataset,
                          transformation=strategy,
                          attack_method=attacker,
                          attack_params='eps100_batchsize{}_{}'.format(
                              batch_size, i))

    duration = time.monotonic() - start
    print('----------------------------------')
    print('        Summary')
    print('----------------------------------')
    print('Number of inputs:', Y.shape[0])
    print('Adversary:', attacker)
    print('Strategy:', strategy)
    print('Time cost:', duration)

    sess.close()
Exemple #11
0
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
# To disable the tf warning for compiling in SEE4.2
# 0 = all logs, 1 = info, 2 = warnings, 3 = error

# Arguments
###############################################################################
try:
    args = get_args()
    config = process_config(args.config)
except:
    logging.error("Missing or invalid arguments.")
    exit(0)

# Loading model
##############################################################################
model_formicID = load_model(config=config, num_species=97)
model_formicID = compile_model(model=model_formicID, config=config)
model_formicID = weights_load(
    model=model_formicID,
    weights=
    "experiments/T97_CaAll_QuM_ShP_AugM_D05_LR0001_E200_I4_def_clean/checkpoint/weights_76-1.83.hdf5",
)

# predicting
##############################################################################
Y_true, Y_pred, labels, species_dict = predictor(
    model=model_formicID,
    config=config,
    species_json="data/species_dict.json",
    plot=True,
    n_img=10,
Exemple #12
0
def main():
    # Arguments
    ###########################################################################
    try:
        args = get_args()
        config = process_config(args.config)
    except:
        logging.error("Missing or invalid arguments.")
        exit(0)

    # Logging
    ###########################################################################
    logging.basicConfig(
        filename=os.path.join("logs", config.exp_name + ".log"),
        format="[%(asctime)s] - [%(levelname)s]: %(message)s",
        filemode="a",
        level=logging.DEBUG,
    )
    logging.info("Logging started.")
    logging.info("Keras version: {}".format(keras_version))

    # Session
    ###########################################################################
    sess = tf.Session()
    K.set_session(sess)

    # create experiment related directories
    ###########################################################################
    create_dirs([config.summary_dir, config.checkpoint_dir])

    # Initialize the model
    ###########################################################################
    model_formicID = load_model(config=config, num_species=97)
    model_formicID = compile_model(model=model_formicID, config=config)
    model_formicID = weights_load(
        model=model_formicID,
        weights=
        "experiments/T97_CaAll_QuM_ShSti_AugM_D05_LR0001_E200_I4_def_clean/checkpoint/weights_55-1.76.hdf5",
    )

    # Training in batches with iterator
    ###########################################################################
    history = trainer_dir(
        model=model_formicID,
        config=config,
        callbacks=build_logger(config=config, model=model_formicID),
    )
    save_model(model=model_formicID,
               filename="final_weights.hdf5",
               config=config)

    # Evaluation
    ###########################################################################
    plot_history(history=history, config=config, theme="ggplot", save=None)
    evaluator(model=model_formicID, config=config, test_dir=None)

    # Testing
    ###########################################################################
    Y_true, Y_pred, labels, species_dict = predictor(
        model=model_formicID,
        config=config,
        # species_json="data/species_dict.json",
        plot=True,
        n_img=10,
        n_cols=3,
    )
    predictor_reports(
        Y_true=Y_true,
        Y_pred=Y_pred,
        config=config,
        species_dict=species_dict,
        target_names=labels,
        digits=5,
    )
    plot_confusion_matrix(
        Y_pred=Y_pred,
        Y_true=Y_true,
        config=config,
        target_names=labels,
        species_dict=species_dict,
        title=None,
        cmap="viridis",
        normalize=True,
        scores=True,
        score_size=8,
        save="confusion_matrix.png",
    )
    # Footer
    ###########################################################################
    K.clear_session()
    logging.info("Logging ended.")
Exemple #13
0
def train():
    """Main function used for training for model. Keeps iterating and updating parameters until early stop condition is reached."""

    #Generator is used to sample bacthes.
    train_loader = generator.Generator(args.dataset_root,
                                       args,
                                       partition='train',
                                       dataset=args.dataset)

    io.cprint('Batch size: ' + str(args.batch_size))
    print("Learning rate is " + str(args.lr))

    #Try to load models
    enc_nn = models.load_model('enc_nn', args, io)
    metric_nn = models.load_model('metric_nn', args, io)

    #creates models
    if enc_nn is None or metric_nn is None:
        enc_nn, metric_nn = models.create_models(args, train_loader)

    softmax_module = models.SoftmaxModule()
    if args.cuda:
        enc_nn.cuda()
        metric_nn.cuda()

    io.cprint(str(enc_nn))
    io.cprint(str(metric_nn))

    weight_decay = 0
    if args.dataset == 'sensor':
        print('Weight decay ' + str(1e-6))
        weight_decay = 1e-6

    opt_enc_nn = optim.Adam(filter(lambda p: p.requires_grad,
                                   enc_nn.parameters()),
                            lr=args.lr,
                            weight_decay=weight_decay)
    opt_metric_nn = optim.Adam(metric_nn.parameters(),
                               lr=args.lr,
                               weight_decay=weight_decay)

    enc_nn.train()
    metric_nn.train()
    counter = 0
    total_loss = 0
    test_cycle = 0
    batch_idx = 0

    start = time.time()
    print("starting time count")
    e_stop = early_stop.EarlyStopping()

    #Start training loop
    while e_stop.early_stop is False:
        ####################
        # Train
        ####################
        #Load training batch
        data, _ = train_loader.get_task_batch(batch_size=args.batch_size,
                                              cuda=args.cuda,
                                              variable=True)

        [batch_x, label_x, _, _, batches_xi, labels_yi] = data

        opt_enc_nn.zero_grad()
        opt_metric_nn.zero_grad()

        #Calculate loss
        loss_d_metric = train_batch(
            model=[enc_nn, metric_nn, softmax_module],
            data=[batch_x, label_x, batches_xi, labels_yi])
        #Update parameter
        opt_enc_nn.step()
        opt_metric_nn.step()

        #Adjust learning rate
        adjust_learning_rate(optimizers=[opt_enc_nn, opt_metric_nn],
                             lr=args.lr,
                             iter=batch_idx)

        ####################
        # Display
        ####################
        counter += 1
        total_loss += loss_d_metric.item()
        if batch_idx % args.log_interval == 0:
            display_str = 'Train Iter: {}'.format(batch_idx)
            display_str += '\tLoss_d_metric: {:.6f}'.format(total_loss /
                                                            counter)
            io.cprint(display_str)
            counter = 0
            total_loss = 0

        ####################
        # Test
        ####################
        #Testing at specific itnervals
        if (batch_idx + 1) % args.test_interval == 0 or batch_idx == 0:
            if batch_idx == 20:
                test_samples = 200
            else:
                test_samples = 300

            e_stop = test.test_one_shot(
                e_stop,
                test_cycle,
                args,
                model=[enc_nn, metric_nn, softmax_module],
                test_samples=test_samples,
                partition='val')

            enc_nn.train()
            metric_nn.train()

            test_cycle = test_cycle + 1

            end = time.time()
            io.cprint("Time elapsed : " + str(end - start))
            print("Time elapsed : " + str(end - start))

        ####################
        # Save model
        ####################
        #Save model at specific interval
        if (batch_idx + 1) % args.save_interval == 0:
            torch.save(enc_nn,
                       'checkpoints/%s/models/enc_nn.t7' % args.exp_name)
            torch.save(metric_nn,
                       'checkpoints/%s/models/metric_nn.t7' % args.exp_name)

        batch_idx = batch_idx + 1

    #Test after training
    #Load best model
    final_enc_nn = models.load_best_model('enc_nn', io)
    final_metric_nn = models.load_best_model('metric_nn', io)

    final_enc_nn.cuda()
    final_metric_nn.cuda()

    test.test_one_shot(e_stop,
                       test_cycle,
                       args,
                       model=[final_enc_nn, final_metric_nn, softmax_module],
                       test_samples=args.test_samples,
                       partition='test')