Esempio n. 1
0
def test(args):

    model_fn = model_dict[args_.model]
    model = model_fn(num_classes=1 if args_.model == 'fd' else 10)
    model = nn.DataParallel(model, args.gpu_ids)

    ckpt_info = ModelSaver.load_model(args.ckpt_path, model)
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    model.eval()

    test_set = FilterDataset('alexnet', './filters', is_training=False)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.num_workers)
    logger = TestLogger(args)

    logger.start_epoch()
    for inputs, labels in test_loader:
        logger.start_iter()

        with torch.set_grad_enabled(True):
            # Forward
            logits = model.forward(inputs.to(args.device))

        logger.end_iter(inputs, labels, logits)
    logger.end_epoch()
Esempio n. 2
0
    def setUp(self):
        self.logger = TestLogger()

        self.lc = LendingClub(logger=self.logger)
        self.lc.session.base_url = 'http://127.0.0.1:8000/'
        self.lc.session.set_logger(None)
        self.lc.authenticate('*****@*****.**', 'supersecret')
Esempio n. 3
0
    def setUp(self):
        self.logger = TestLogger()

        self.lc = LendingClub(logger=self.logger)
        self.lc.session.base_url = 'http://127.0.0.1:8000/'
        self.lc.session.set_logger(None)

        self.lc.authenticate('*****@*****.**', 'supersecret')

        # Make sure session is enabled and clear
        self.lc.session.post('/session/enabled')
        self.lc.session.request('delete', '/session')
Esempio n. 4
0
    def setUp(self):
        self.filters = Filter()
        self.filters['exclude_existing'] = False
        self.logger = TestLogger()

        self.lc = LendingClub(logger=self.logger)
        self.lc.session.base_url = 'http://127.0.0.1:8000/'
        self.lc.session.set_logger(None)
        self.lc.authenticate('*****@*****.**', 'supersecret')

        response = self.lc.session.get('/filter_validation', query={'id': 1})
        json_response = response.json()
        self.loan_list = json_response['loanFractions']
Esempio n. 5
0
def test(args):

    model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    model.eval()

    # Run a single evaluation
    eval_loader = WhiteboardLoader(args.data_dir, args.phase, args.batch_size,
                                   shuffle=False, do_augment=False, num_workers=args.num_workers)
    logger = TestLogger(args, len(eval_loader.dataset))
    logger.start_epoch()
    evaluator = ModelEvaluator([eval_loader], logger, num_visuals=args.num_visuals, prob_threshold=args.prob_threshold)
    metrics = evaluator.evaluate(model, args.device, logger.epoch)
    logger.end_epoch(metrics)
Esempio n. 6
0
def test(args):

    model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    model.eval()

    data_loader = get_loader(args, phase=args.phase, is_training=False)
    logger = TestLogger(args, len(data_loader.dataset))

    # Get model outputs, log to TensorBoard, write masks to disk window-by-window
    util.print_err('Writing model outputs to {}...'.format(args.results_dir))

    all_gender = []
    all_age = []
    all_tte = []
    all_is_alive = []
    all_mu = []
    all_s2 = []
    with tqdm(total=len(data_loader.dataset), unit=' windows') as progress_bar:
        for i, (src, tgt) in enumerate(data_loader):
            all_gender.extend([int(x) for x in src[:, 0]])
            all_age.extend([float(x) for x in src[:, 1]])
            all_tte.extend([float(x) for x in tgt[:, 0]])
            all_is_alive.extend([int(x) for x in tgt[:, 1]])
            with torch.no_grad():
                pred_params = model.forward(src.to(args.device))

                # import pdb
                # pdb.set_trace()
                outputs = pred_params.cpu().numpy()
                all_mu.extend([float(x) for x in outputs[:, 0]])
                all_s2.extend([float(x) for x in outputs[:, 1]])

            progress_bar.update(src.size(0))

    # print pred_params (mu, s) to file
    fd = open(args.results_dir + '/test_stats.csv', 'w')
    fd.write('gender, age, tte, is_alive, mu, s2\n')
    for gender, age, tte, is_alive, mu, s2 \
        in zip(all_gender, all_age, all_tte, all_is_alive, all_mu, all_s2):

        fd.write('%d, %f, %f, %d, %f, %f\n' %
                 (gender, age, tte, is_alive, mu, s2))
    fd.close()
Esempio n. 7
0
    def setUp(self):
        self.logger = TestLogger()

        self.lc = LendingClub(logger=self.logger)
        self.lc.session.base_url = 'http://127.0.0.1:8000/'
        self.lc.session.set_logger(None)

        self.lc.authenticate('*****@*****.**', 'supersecret')

        # Make sure session is enabled and clear
        self.lc.session.post('/session/enabled')
        self.lc.session.request('delete', '/session')

        # Use version 3 of browseNotesAj.json
        self.lc.session.post('/session', data={'browseNotesAj': '3'})

        # Start order
        self.order = self.lc.start_order()
Esempio n. 8
0
def life_cycle_stack(stack_name,template, parameters, camVariables, delete_failed_deployment, delete=True, statsd=None):
    '''
    Deploy the template and wait until it finishes successfully.
    Destroy the stack after the deployment finished.
    '''
    iaas = IaaS()
    result={}
    result['name'] = stack_name
    stack = None
    logger = TestLogger(__name__)
    delete_deployment = True
    if not delete:
        delete_deployment = False

    try:
        stack = iaas.deploy(stack_name,template, parameters, camVariables)
        iaas.waitForSuccess(stack, _WORKER_TIME_SEC)
    except AuthException, ex:
        logger.warning('Authentication Error, re-authenticating\n%s' %ex)
        stack = None
        raise ex
Esempio n. 9
0
def test(args):

    model_fn = models.__dict__[args_.model]
    model = model_fn(args.num_classes)
    model = nn.DataParallel(model, args.gpu_ids)

    ckpt_info = ModelSaver.load_model(args.ckpt_path, model)
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    model.eval()

    _, test_loader, _ = get_cifar_loaders(args.batch_size, args.num_workers)
    logger = TestLogger(args)

    logger.start_epoch()
    for inputs, labels in test_loader:
        logger.start_iter()

        with torch.set_grad_enabled(True):
            # Forward
            logits = model.forward(inputs.to(args.device))

        logger.end_iter(inputs, labels, logits)
    logger.end_epoch()
def test(args):
    print ("Stage 1")
    model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    print ("Stage 2")
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    print ("Stage 3")
    model.eval()
    print ('This should be false: {}'.format(model.training))
    print ("Stage 4")
    data_loader = CTDataLoader(args, phase=args.phase, is_training=False)
    #print('data_loader={}'.format(data_loader))
    #print('data_loader.dataset={}'.format(data_loader.dataset))
    study2slices = defaultdict(list)
    study2probs = defaultdict(list)
    study2labels = {}
    logger = TestLogger(args, len(data_loader.dataset), data_loader.dataset.pixel_dict)
    print("Stage 5")
    f = open('/projectnb/ece601/kaggle-pulmonary-embolism/meganmp/train/series_list.pkl','rb')
    data_labels = pickle.load(f)

    # Create list to manually process labels
    #with open('positive.txt') as f:
         #pos_labels = f.readlines()
    #pos_labels = [x.strip() for x in pos_labels]
    ispos = [x.is_positive for x in data_labels]
    isposidx = [x.study_num for x in data_labels]
    label_dict = {}
    for i in range(len(ispos)):
        label_dict[isposidx[i]] = ispos[i]

    for key in label_dict.keys():
        print('label_dict={}\t{}'.format(key, label_dict[key]))
    

    # Get model outputs, log to TensorBoard, write masks to disk window-by-window
    util.print_err('Writing model outputs to {}...'.format(args.results_dir))
    with tqdm(total=len(data_loader.dataset), unit=' windows') as progress_bar:
        for i, (inputs, targets_dict) in enumerate(data_loader):
            with torch.no_grad():
                cls_logits = model.forward(inputs.to(args.device))
                cls_probs = torch.sigmoid(cls_logits)

            if args.visualize_all:
                logger.visualize(inputs, cls_logits, targets_dict=None, phase=args.phase, unique_id=i)

            max_probs = cls_probs.to('cpu').numpy()
            for study_num, slice_idx, prob in \
                    zip(targets_dict['study_num'], targets_dict['slice_idx'], list(max_probs)):
                #print('targets_dict[studynum]={}'.format(targets_dict['study_num']))
                #print('targets_dict[sliceidx]={}'.format(targets_dict['slice_idx']))
                # Convert to standard python data types
                study_num = study_num #.item()
                #study_num = int(study_num)
                slice_idx = int(slice_idx)

                # Save series num for aggregation
                study2slices[study_num].append(slice_idx)
                study2probs[study_num].append(prob.item())


                series = data_loader.get_series(study_num)
                if study_num not in study2labels:
                    print('study_num={}'.format(study_num))
                    print('series.is_positive={}'.format(label_dict[study_num]))
                    study2labels[study_num] = label_dict[study_num]
                    #if study_num in pos_labels:
                        #print('DEBUG -------=1?-------------------')
                        #print('POS LABEL')
                        #print('study_num={}'.format(study_num))
                        #study2labels[study_num] = 1
                    #else:
                        #print('Not in study2labels. series = {}'.format(study_num))
                        #print('series.is_positive={}'.format(series.is_positive))
                        #study2labels[study_num] = int(series.is_positive)
                        #print('study2labels: {}'.format(study2labels[study_num]))

            progress_bar.update(inputs.size(0))

    print('study2labels={}'.format(study2labels))

    # Combine masks
    util.print_err('Combining masks...')
    max_probs = []
    labels = []
    predictions = {}
    print("Get max prob")
    for study_num in tqdm(study2slices):

        # Sort by slice index and get max probability
        slice_list, prob_list = (list(t) for t in zip(*sorted(zip(study2slices[study_num], study2probs[study_num]),
                                                              key=lambda slice_and_prob: slice_and_prob[0])))
        study2slices[study_num] = slice_list
        study2probs[study_num] = prob_list
        max_prob = max(prob_list)
        print('study={}\tmax_prob={}'.format(study_num, max_prob))
        max_probs.append(max_prob)
        label = study2labels[study_num]
        labels.append(label)
        predictions[study_num] = {'label':label, 'pred':max_prob}

    #Save predictions to file, indexed by study number
    print("Saving predictions to pickle files")
    with open('{}/preds.pickle'.format(args.results_dir),"wb") as fp:
        pickle.dump(predictions,fp)

    results_series = [k for k,_ in predictions.items()]
    results_pred = [v['pred'] for _,v in predictions.items()]
    results_label = [v['label'] for _,v in predictions.items()]
    print('roc_auc_score={}'.format(roc_auc_score(results_label, results_pred)))

    # Create dataframe summary
    TRAIN_CSV = '/projectnb/ece601/kaggle-pulmonary-embolism/rsna-str-pulmonary-embolism-detection/train.csv'
    train_df = pd.read_csv(TRAIN_CSV)
    train_df = train_df[['SeriesInstanceUID', 'negative_exam_for_pe']]
    train_df = train_df.groupby('SeriesInstanceUID').aggregate(list)
    train_df['pe_label'] = train_df['negative_exam_for_pe'].apply(lambda x: 0 if 1 in x else 1)

    results_dict = {
        'series': results_series,
        'pred': results_pred
    }
    results_df = pd.DataFrame.from_dict(results_dict)

    results_df = results_df.set_index('series')
    results_df = results_df.join(train_df, how='left').reset_index().rename({'index': 'series'})
    print('roc_auc_score={}'.format(roc_auc_score(results_df['pe_label'], results_df['pred'])))
    
    # Calculate confusion matrix
    results_df['interpretation'] = results_df['pred'].apply(lambda x: 0 if x < 0.5 else 1)
    print(results_df.head(10))
    tn, fp, fn, tp = confusion_matrix(results_df['pe_label'], results_df['interpretation']).ravel()
    print('confusion_matrix: [{} {} {} {}]'.format(tp, fp, fn, tn))
Esempio n. 11
0
#!/usr/bin/env python

import sys
import unittest
import getpass
from random import choice
from logger import TestLogger

sys.path.insert(0, '.')
sys.path.insert(0, '../')
sys.path.insert(0, '../../')

from lendingclub import LendingClub, Order
from lendingclub.filters import Filter

logger = TestLogger()
lc = LendingClub(logger=logger)


class LiveTests(unittest.TestCase):

    def setUp(self):
        # Clear any existing orders
        lc.session.clear_session_order()

        # Override Order.__place_order so that no orders can be made
        Order._Order__place_order = self.place_order_override

        # Make sure that the override worked
        o = Order(lc)
        self.assertEqual(o._Order__place_order('token'), 12345)
Esempio n. 12
0
    def __init__(self, burnin, enable_eth_tests, enable_modem_test=0, **kw):
        self._burnin = burnin
        self._preflight_done = 0

        # DO NOT CHANGE THE ORDER OF THESE THREE ITEMS!  Thank you!
        self._config = TestConfig(burnin, '/etc/hw_test.conf')
        self._hw_info = HardwareInfo()
        self._logger = TestLogger(burnin, self._hw_info.serialno())

        self._bootlog_tester = None
        self._car_tester = None
        self._dallas_tester = None
        self._eth_tester = None
        self._i2c_tester = None
        self._mem_tester = None
        self._stressmemtester = None
        self._modem_tester = None
        self._serial_tester = None
        self._usb_tester = None
        self._enabled_tests = 0

        model = self._hw_info.model()
        if kw.get('bootlog', True):
            self._bootlog_tester = BootlogTester(self._config, self._hw_info,
                                                 self._logger)
            self._enabled_tests += 1
        if kw.get('ram', True):
            self._mem_tester = MemoryTester(self._config, self._hw_info,
                                            self._logger)
            self._enabled_tests += 1
        # do not perform stress test unless called for explicitly.
        if kw.get('ramstress', False):
            self._stressmemtester = MemoryTester(self._config, self._hw_info,
                                                 self._logger, True)
            self._enabled_tests += 1
        if enable_eth_tests and kw.get('ethernet', True):
            self._eth_tester = NetworkTester(self._config, self._hw_info,
                                             self._logger)
            self._enabled_tests += 1
        if not model in ('TSWS', 'PC', 'Unknown', ''):
            self._avr_copro = avr()
            if kw.get('serial', True):
                self._serial_tester = SerialTester(self._config, self._hw_info,
                                                   self._logger)
                self._enabled_tests += 1
        if model in ('1200', '2400', '1500', '2500'):
            if kw.get('relayscounters', True):
                self._car_tester = CountersAndRelaysTester(
                    self._config, self._hw_info, self._logger, self._avr_copro)
                self._enabled_tests += 1
            if kw.get('dallas', True):
                self._dallas_tester = DallasTester(self._config, self._hw_info,
                                                   self._logger,
                                                   self._avr_copro)
                self._enabled_tests += 1
        if model in ('1500', '2500') and kw.get('i2c', True):
            self._i2c_tester = I2CTester(self._config, self._hw_info,
                                         self._logger)
            self._enabled_tests += 1
        if self._hw_info.model() in ('2400', '2500') and kw.get('usb', True):
            self._usb_tester = USBTester(self._config, self._hw_info,
                                         self._logger)
            self._enabled_tests += 1
        return
Esempio n. 13
0
 def setUp(self):
     self.logger = TestLogger()
     self.session = session.Session(logger=self.logger)
     self.session.base_url = 'http://127.0.0.1:8000/'
def test(args):
    print ("Stage 1")
    model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    print ("Stage 2")
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    print ("Stage 3")
    model.eval()
    print ("Stage 4")
    data_loader = CTDataLoader(args, phase=args.phase, is_training=False)
    study2slices = defaultdict(list)
    study2probs = defaultdict(list)
    study2labels = {}
    logger = TestLogger(args, len(data_loader.dataset), data_loader.dataset.pixel_dict)

    means = []

    # Get model outputs, log to TensorBoard, write masks to disk window-by-window
    util.print_err('Writing model outputs to {}...'.format(args.results_dir))
    with tqdm(total=len(data_loader.dataset), unit=' windows') as progress_bar:
        for i, (inputs, targets_dict) in enumerate(data_loader):
            means.append(inputs.mean().data[0])
            with torch.no_grad():
                cls_logits = model.forward(inputs.to(args.device))
                cls_probs = F.sigmoid(cls_logits)

            if args.visualize_all:
                logger.visualize(inputs, cls_logits, targets_dict=None, phase=args.phase, unique_id=i)

            max_probs = cls_probs.to('cpu').numpy()
            for study_num, slice_idx, prob in \
                    zip(targets_dict['study_num'], targets_dict['slice_idx'], list(max_probs)):
                # Convert to standard python data types
                study_num = int(study_num)
                slice_idx = int(slice_idx)

                # Save series num for aggregation
                study2slices[study_num].append(slice_idx)
                study2probs[study_num].append(prob.item())

                series = data_loader.get_series(study_num)
                if study_num not in study2labels:
                    study2labels[study_num] = int(series.is_positive)

            progress_bar.update(inputs.size(0))
    
    # Combine masks
    util.print_err('Combining masks...')
    max_probs = []
    labels = []
    predictions = {}
    print("Get max probability")
    for study_num in tqdm(study2slices):

        # Sort by slice index and get max probability
        slice_list, prob_list = (list(t) for t in zip(*sorted(zip(study2slices[study_num], study2probs[study_num]),
                                                              key=lambda slice_and_prob: slice_and_prob[0])))
        study2slices[study_num] = slice_list
        study2probs[study_num] = prob_list
        max_prob = max(prob_list)
        max_probs.append(max_prob)
        label = study2labels[study_num]
        labels.append(label)
        predictions[study_num] = {'label':label, 'pred':max_prob}

    #Save predictions to file, indexed by study number
    print("Save to pickle")
    with open('{}/preds.pickle'.format(args.results_dir),"wb") as fp:
        pickle.dump(predictions,fp)
        
    # Write features for XGBoost
    save_for_xgb(args.results_dir, study2probs, study2labels)
    # Write the slice indices used for the features
    print("Write slice indices")
    with open(os.path.join(args.results_dir, 'xgb', 'series2slices.json'), 'w') as json_fh:
        json.dump(study2slices, json_fh, sort_keys=True, indent=4)

    # Compute AUROC and AUPRC using max aggregation, write to files
    max_probs, labels = np.array(max_probs), np.array(labels)
    metrics = {
        args.phase + '_' + 'AUPRC': sk_metrics.average_precision_score(labels, max_probs),
        args.phase + '_' + 'AUROC': sk_metrics.roc_auc_score(labels, max_probs),
    }
    print("Write metrics")
    with open(os.path.join(args.results_dir, 'metrics.txt'), 'w') as metrics_fh:
        for k, v in metrics.items():
            metrics_fh.write('{}: {:.5f}\n'.format(k, v))

    curves = {
        args.phase + '_' + 'PRC': sk_metrics.precision_recall_curve(labels, max_probs),
        args.phase + '_' + 'ROC': sk_metrics.roc_curve(labels, max_probs)
    }
    for name, curve in curves.items():
        curve_np = util.get_plot(name, curve)
        curve_img = Image.fromarray(curve_np)
        curve_img.save(os.path.join(args.results_dir, '{}.png'.format(name)))
Esempio n. 15
0
def test(args):
    # Get loader for z-test
    loader = get_loader(args, phase='test')
    batch_size = args.batch_size
    class_vector = None

    # TODO: make into function that takes in args.model and returns the pretrained model
    #       and also consider whether it's class conditional and what kind of class conditional (how many classes) -> probably just imagenet now, actually maybe cifar-10 too
    #       and also consider add truncation sampling as option too - this should return model, z_test noise vec, and class_vec (optionally)
    if args.ckpt_path and not args.use_pretrained:
        model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    else:
        if 'BigGAN' in args.model:
            num_params = int(''.join(filter(str.isdigit, args.model)))

            if 'perturbation' in args.loss_fn:
                # Use custom BigGAN with Perturbation Net wrapper
                model = models.BigGANPerturbationNet.from_pretrained(
                    f'biggan-deep-{num_params}')
            else:
                # Use pretrained BigGAN from package
                model = BigGAN.from_pretrained(f'biggan-deep-{num_params}')

            z_test = truncated_noise_sample(truncation=args.truncation,
                                            batch_size=batch_size)
            z_test = torch.from_numpy(z_test)

            # Get class conditional label
            # 981 is baseball player
            # 207 is golden retriever
            # TODO: Conditional generation only
            class_vector = one_hot_from_int(207, batch_size=batch_size)
            class_vector = torch.from_numpy(class_vector)

        elif 'WGAN-GP' in args.model:
            generator_path = "/deep/group/gen-eval/model-training/src/GAN_models/improved-wgan-pytorch/experiments/exp4_wgan_gp/generator.pt"
            model = torch.load(generator_path)
            z_test = torch.randn(batch_size, 128)

        elif 'BEGAN' in args.model:
            generator_path = "/deep/group/gen-eval/model-training/src/GAN_models/BEGAN-pytorch/trained_models/64/models/gen_97000.pth"
            model = models.BEGANGenerator()
            model.load_state_dict(torch.load(generator_path))

            z_test = np.random.uniform(-1, 1, size=(batch_size, 64))
            z_test = torch.FloatTensor(z_test)

    # Freeze model instead of using .eval()
    for param in model.parameters():
        param.requires_grad = False

    # If using perturbation net, learn perturbation layers
    if 'perturbation' in args.loss_fn:
        trainable_params = []
        for name, param in model.named_parameters():
            if 'perturb' in name:
                param.requires_grad = True
                trainable_params.append(param)
        print(f'Number of trainable params: {len(trainable_params)}')

    model = nn.DataParallel(model, args.gpu_ids)
    model = model.to(args.device)

    # Loss functions
    if 'mse' in args.loss_fn:
        pixel_criterion = torch.nn.MSELoss().to(args.device)
    else:
        pixel_criterion = torch.nn.L1Loss().to(args.device)

    if 'perceptual' in args.loss_fn:
        # Combination pixel-perceptual loss - Sec 3.2. By default, uses pixel L1.
        perceptual_criterion = torch.nn.L1Loss().to(args.device)
        perceptual_loss_weight = args.perceptual_loss_weight

        vgg_feature_extractor = models.VGGFeatureExtractor().to(args.device)
        vgg_feature_extractor.eval()
    elif 'perturbation' in args.loss_fn:
        # Perturbation network R. By default, uses pixel L1.
        # Sec 3.3: http://ganpaint.io/Bau_et_al_Semantic_Photo_Manipulation_preprint.pdf
        reg_loss_weight = args.reg_loss_weight

    # z_loss_fn = util.get_loss_fn(args.loss_fn, args)
    max_z_test_loss = 100.  # TODO: actually put max value possible here

    # Get logger, saver
    logger = TestLogger(args)
    # saver = ModelSaver(args) TODO: saver for perturbation network R

    print(f'Logs: {logger.log_dir}')
    print(f'Ckpts: {args.save_dir}')

    # Run z-test in batches
    logger.log_hparams(args)

    while not logger.is_finished_training():
        logger.start_epoch()

        for _, z_test_target, mask in loader:
            logger.start_iter()
            if torch.cuda.is_available():
                mask = mask.cuda()
                z_test = z_test.cuda()
                z_test_target = z_test_target.cuda()
                #class_vector = class_vector.cuda()

            masked_z_test_target = z_test_target * mask
            obscured_z_test_target = z_test_target * (1.0 - mask)

            if 'perturbation' in args.loss_fn:
                # With backprop on only trainable parameters in perturbation net
                params = trainable_params + [z_test.requires_grad_()]
                z_optimizer = util.get_optimizer(params, args)
            else:
                # With backprop on only the input z, run one step of z-test and get z-loss
                z_optimizer = util.get_optimizer([z_test.requires_grad_()],
                                                 args)

            with torch.set_grad_enabled(True):

                if class_vector is not None:
                    z_probs = model.forward(z_test, class_vector,
                                            args.truncation).float()
                    z_probs = (z_probs + 1) / 2.
                else:
                    z_probs = model.forward(z_test).float()

                # Calculate the masked loss using z-test vector
                masked_z_probs = z_probs * mask
                z_loss = torch.zeros(1, requires_grad=True).to(args.device)

                pixel_loss = torch.zeros(1, requires_grad=True).to(args.device)
                pixel_loss = pixel_criterion(masked_z_probs,
                                             masked_z_test_target)

                if 'perceptual' in args.loss_fn:
                    z_probs_features = vgg_feature_extractor(masked_z_probs)
                    z_test_features = vgg_feature_extractor(
                        masked_z_test_target).detach()

                    perceptual_loss = torch.zeros(1, requires_grad=True).to(
                        args.device)
                    perceptual_loss = perceptual_criterion(
                        z_probs_features, z_test_features)

                    z_loss = pixel_loss + perceptual_loss_weight * perceptual_loss
                elif 'perturbation' in args.loss_fn:
                    reg_loss = torch.zeros(1,
                                           requires_grad=True).to(args.device)
                    for name, param in model.named_parameters():
                        if 'perturb' in name:
                            delta = param - 1
                            reg_loss += torch.pow(delta, 2).mean()  #sum()
                    z_loss = pixel_loss + reg_loss_weight * reg_loss
                else:
                    z_loss = pixel_loss

                # Backprop on z-test vector
                z_loss.backward()
                z_optimizer.step()
                z_optimizer.zero_grad()

            # Compute the full loss (without mask) and obscured loss (loss only on masked region)
            # For logging and final evaluation (obscured loss is final MSE), so not in backprop loop
            full_z_loss = torch.zeros(1)
            full_pixel_loss = torch.zeros(1)
            full_pixel_loss = pixel_criterion(z_probs, z_test_target)  #.mean()

            obscured_z_probs = z_probs * (1.0 - mask)
            obscured_z_loss = torch.zeros(1)
            obscured_pixel_loss = torch.zeros(1)
            obscured_pixel_loss = pixel_criterion(
                obscured_z_probs, obscured_z_test_target)  #.mean()

            if 'perceptual' in args.loss_fn:
                # Full loss
                z_probs_full_features = vgg_feature_extractor(z_probs).detach()
                z_test_full_features = vgg_feature_extractor(
                    z_test_target).detach()

                full_perceptual_loss = torch.zeros(1)
                full_perceptual_loss = perceptual_criterion(
                    z_probs_full_features, z_test_full_features)

                full_z_loss = full_pixel_loss + perceptual_loss_weight * full_perceptual_loss

                # Obscured loss
                z_probs_obscured_features = vgg_feature_extractor(
                    z_probs).detach()
                z_test_obscured_features = vgg_feature_extractor(
                    z_test_target).detach()

                obscured_perceptual_loss = torch.zeros(1)
                obscured_perceptual_loss = perceptual_criterion(
                    z_probs_obscured_features, z_test_obscured_features)

                obscured_z_loss = obscured_pixel_loss + perceptual_loss_weight * obscured_perceptual_loss
            elif 'perturbation' in args.loss_fn:
                full_z_loss = full_pixel_loss + reg_loss_weight * reg_loss
                obscured_z_loss = obscured_pixel_loss + reg_loss_weight * reg_loss
            else:
                full_z_loss = full_pixel_loss
                obscured_z_loss = obscured_pixel_loss
            """# TODO: z_loss is not always MSE anymore - figure out desired metric
            if z_loss < max_z_test_loss:
                # Save MSE on obscured region # TODO: z_loss is not always MSE anymore - figure out desired metric
                final_metrics = {'z-loss': z_loss.item(), 'obscured-z-loss': obscured_z_loss.item()}
                logger._log_scalars(final_metrics)
                print('Recall (z loss - non obscured loss - if MSE)', z_loss) 
                print('Precision (MSE value on masked region)', obscured_z_loss)
            """

            # Log both train and eval model settings, and visualize their outputs
            logger.log_status(
                masked_probs=masked_z_probs,
                masked_loss=z_loss,
                masked_test_target=masked_z_test_target,
                full_probs=z_probs,
                full_loss=full_z_loss,
                full_test_target=z_test_target,
                obscured_probs=obscured_z_probs,
                obscured_loss=obscured_z_loss,
                obscured_test_target=obscured_z_test_target,
                save_preds=args.save_preds,
            )

            logger.end_iter()

        logger.end_epoch()

    # Last log after everything completes
    logger.log_status(
        masked_probs=masked_z_probs,
        masked_loss=z_loss,
        masked_test_target=masked_z_test_target,
        full_probs=z_probs,
        full_loss=full_z_loss,
        full_test_target=z_test_target,
        obscured_probs=obscured_z_probs,
        obscured_loss=obscured_z_loss,
        obscured_test_target=obscured_z_test_target,
        save_preds=args.save_preds,
        force_visualize=True,
    )
Esempio n. 16
0
# =COPYRIGHT=======================================================
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2017, 2018 All Rights Reserved
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
# =================================================================

import os
import requests
import lib.env as env
from logger import TestLogger
from retrying import retry

logger = TestLogger(__name__)


def authenticate():
    bearer_token = _get_bearer_token()
    org_guid = _get_org_guid(bearer_token)
    space_guid = _get_space_guid(bearer_token, org_guid)

    return [bearer_token, org_guid, space_guid]


# retry: 5 times to get bearer_token, with 2sec exponential backoff (max 10-sec between tries)
@retry(stop_max_attempt_number=5,
       wait_exponential_multiplier=2000,
       wait_exponential_max=10000)
def _get_bearer_token():
Esempio n. 17
0
 def setUpClass(cls):
     cls.logger = TestLogger("ParserXls")
     file_path = cls.root_dir + "test_file.xlsx"
     cls.parser = ParserXls(file_path=file_path)
     cls.file_fonte = cls.parser.open_file(sheet_name=None)
Esempio n. 18
0
 def setUp(self):
     self.logger = TestLogger()
     self.session = session.Session(logger=self.logger)
Esempio n. 19
0
 def __init__(self):
     self._authenticate()
     self.tenant_id = tenant.get_tenant_id(self.bearer_token, self.org_guid, self.space_guid)
     self.logger = TestLogger(__name__)