def test(args):

    model_fn = model_dict[args_.model]
    model = model_fn(num_classes=1 if args_.model == 'fd' else 10)
    model = nn.DataParallel(model, args.gpu_ids)

    ckpt_info = ModelSaver.load_model(args.ckpt_path, model)
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    model.eval()

    test_set = FilterDataset('alexnet', './filters', is_training=False)
    test_loader = torch.utils.data.DataLoader(test_set,
                                              batch_size=args.batch_size,
                                              shuffle=False,
                                              num_workers=args.num_workers)
    logger = TestLogger(args)

    logger.start_epoch()
    for inputs, labels in test_loader:
        logger.start_iter()

        with torch.set_grad_enabled(True):
            # Forward
            logits = model.forward(inputs.to(args.device))

        logger.end_iter(inputs, labels, logits)
    logger.end_epoch()
Exemple #2
0
    def setUp(self):
        self.logger = TestLogger()

        self.lc = LendingClub(logger=self.logger)
        self.lc.session.base_url = 'http://127.0.0.1:8000/'
        self.lc.session.set_logger(None)
        self.lc.authenticate('*****@*****.**', 'supersecret')
    def setUp(self):
        self.logger = TestLogger()

        self.lc = LendingClub(logger=self.logger)
        self.lc.session.base_url = 'http://127.0.0.1:8000/'
        self.lc.session.set_logger(None)

        self.lc.authenticate('*****@*****.**', 'supersecret')

        # Make sure session is enabled and clear
        self.lc.session.post('/session/enabled')
        self.lc.session.request('delete', '/session')
def life_cycle_stack(stack_name,template, parameters, camVariables, delete_failed_deployment, delete=True, statsd=None):
    '''
    Deploy the template and wait until it finishes successfully.
    Destroy the stack after the deployment finished.
    '''
    iaas = IaaS()
    result={}
    result['name'] = stack_name
    stack = None
    logger = TestLogger(__name__)
    delete_deployment = True
    if not delete:
        delete_deployment = False

    try:
        stack = iaas.deploy(stack_name,template, parameters, camVariables)
        iaas.waitForSuccess(stack, _WORKER_TIME_SEC)
    except AuthException, ex:
        logger.warning('Authentication Error, re-authenticating\n%s' %ex)
        stack = None
        raise ex
Exemple #5
0
    def setUp(self):
        self.filters = Filter()
        self.filters['exclude_existing'] = False
        self.logger = TestLogger()

        self.lc = LendingClub(logger=self.logger)
        self.lc.session.base_url = 'http://127.0.0.1:8000/'
        self.lc.session.set_logger(None)
        self.lc.authenticate('*****@*****.**', 'supersecret')

        response = self.lc.session.get('/filter_validation', query={'id': 1})
        json_response = response.json()
        self.loan_list = json_response['loanFractions']
Exemple #6
0
def test(args):

    model_fn = models.__dict__[args_.model]
    model = model_fn(args.num_classes)
    model = nn.DataParallel(model, args.gpu_ids)

    ckpt_info = ModelSaver.load_model(args.ckpt_path, model)
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    model.eval()

    _, test_loader, _ = get_cifar_loaders(args.batch_size, args.num_workers)
    logger = TestLogger(args)

    logger.start_epoch()
    for inputs, labels in test_loader:
        logger.start_iter()

        with torch.set_grad_enabled(True):
            # Forward
            logits = model.forward(inputs.to(args.device))

        logger.end_iter(inputs, labels, logits)
    logger.end_epoch()
Exemple #7
0
def test(args):

    model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    model.eval()

    data_loader = get_loader(args, phase=args.phase, is_training=False)
    logger = TestLogger(args, len(data_loader.dataset))

    # Get model outputs, log to TensorBoard, write masks to disk window-by-window
    util.print_err('Writing model outputs to {}...'.format(args.results_dir))

    all_gender = []
    all_age = []
    all_tte = []
    all_is_alive = []
    all_mu = []
    all_s2 = []
    with tqdm(total=len(data_loader.dataset), unit=' windows') as progress_bar:
        for i, (src, tgt) in enumerate(data_loader):
            all_gender.extend([int(x) for x in src[:, 0]])
            all_age.extend([float(x) for x in src[:, 1]])
            all_tte.extend([float(x) for x in tgt[:, 0]])
            all_is_alive.extend([int(x) for x in tgt[:, 1]])
            with torch.no_grad():
                pred_params = model.forward(src.to(args.device))

                # import pdb
                # pdb.set_trace()
                outputs = pred_params.cpu().numpy()
                all_mu.extend([float(x) for x in outputs[:, 0]])
                all_s2.extend([float(x) for x in outputs[:, 1]])

            progress_bar.update(src.size(0))

    # print pred_params (mu, s) to file
    fd = open(args.results_dir + '/test_stats.csv', 'w')
    fd.write('gender, age, tte, is_alive, mu, s2\n')
    for gender, age, tte, is_alive, mu, s2 \
        in zip(all_gender, all_age, all_tte, all_is_alive, all_mu, all_s2):

        fd.write('%d, %f, %f, %d, %f, %f\n' %
                 (gender, age, tte, is_alive, mu, s2))
    fd.close()
Exemple #8
0
    def setUp(self):
        self.logger = TestLogger()

        self.lc = LendingClub(logger=self.logger)
        self.lc.session.base_url = 'http://127.0.0.1:8000/'
        self.lc.session.set_logger(None)

        self.lc.authenticate('*****@*****.**', 'supersecret')

        # Make sure session is enabled and clear
        self.lc.session.post('/session/enabled')
        self.lc.session.request('delete', '/session')

        # Use version 3 of browseNotesAj.json
        self.lc.session.post('/session', data={'browseNotesAj': '3'})

        # Start order
        self.order = self.lc.start_order()
Exemple #9
0
def test(args):

    model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    model.eval()

    # Run a single evaluation
    eval_loader = WhiteboardLoader(args.data_dir, args.phase, args.batch_size,
                                   shuffle=False, do_augment=False, num_workers=args.num_workers)
    logger = TestLogger(args, len(eval_loader.dataset))
    logger.start_epoch()
    evaluator = ModelEvaluator([eval_loader], logger, num_visuals=args.num_visuals, prob_threshold=args.prob_threshold)
    metrics = evaluator.evaluate(model, args.device, logger.epoch)
    logger.end_epoch(metrics)
Exemple #10
0
    def __init__(self, burnin, enable_eth_tests, enable_modem_test=0, **kw):
        self._burnin = burnin
        self._preflight_done = 0

        # DO NOT CHANGE THE ORDER OF THESE THREE ITEMS!  Thank you!
        self._config = TestConfig(burnin, '/etc/hw_test.conf')
        self._hw_info = HardwareInfo()
        self._logger = TestLogger(burnin, self._hw_info.serialno())

        self._bootlog_tester = None
        self._car_tester = None
        self._dallas_tester = None
        self._eth_tester = None
        self._i2c_tester = None
        self._mem_tester = None
        self._stressmemtester = None
        self._modem_tester = None
        self._serial_tester = None
        self._usb_tester = None
        self._enabled_tests = 0

        model = self._hw_info.model()
        if kw.get('bootlog', True):
            self._bootlog_tester = BootlogTester(
                self._config, self._hw_info, self._logger
                )
            self._enabled_tests += 1
        if kw.get('ram', True):
            self._mem_tester = MemoryTester(
                self._config, self._hw_info, self._logger
                )
            self._enabled_tests += 1
        # do not perform stress test unless called for explicitly.
        if kw.get('ramstress', False):
            self._stressmemtester = MemoryTester(
                self._config, self._hw_info, self._logger, True
                )
            self._enabled_tests += 1
        if enable_eth_tests and kw.get('ethernet', True):
            self._eth_tester = NetworkTester(
                self._config, self._hw_info, self._logger
            )
            self._enabled_tests += 1
        if not model in ('TSWS', 'PC', 'Unknown', ''):
            self._avr_copro = avr()
            if kw.get('serial', True):
                self._serial_tester = SerialTester(
                    self._config, self._hw_info, self._logger
                )
                self._enabled_tests += 1
        if model in ('1200', '2400', '1500', '2500'):
            if kw.get('relayscounters', True):
                self._car_tester = CountersAndRelaysTester(
                    self._config, self._hw_info, self._logger, self._avr_copro
                    )
                self._enabled_tests += 1
            if kw.get('dallas', True):
                self._dallas_tester = DallasTester(
                    self._config, self._hw_info, self._logger, self._avr_copro
                    )
                self._enabled_tests += 1
        if model in ('1500', '2500') and kw.get('i2c', True):
            self._i2c_tester = I2CTester(
                self._config, self._hw_info, self._logger
                )
            self._enabled_tests += 1
        if self._hw_info.model() in ('2400', '2500') and kw.get('usb', True):
            self._usb_tester = USBTester(self._config, self._hw_info, self._logger)
            self._enabled_tests += 1
        return
Exemple #11
0
class TesterFramework(object):
    def __init__(self, burnin, enable_eth_tests, enable_modem_test=0, **kw):
        self._burnin = burnin
        self._preflight_done = 0

        # DO NOT CHANGE THE ORDER OF THESE THREE ITEMS!  Thank you!
        self._config = TestConfig(burnin, '/etc/hw_test.conf')
        self._hw_info = HardwareInfo()
        self._logger = TestLogger(burnin, self._hw_info.serialno())

        self._bootlog_tester = None
        self._car_tester = None
        self._dallas_tester = None
        self._eth_tester = None
        self._i2c_tester = None
        self._mem_tester = None
        self._stressmemtester = None
        self._modem_tester = None
        self._serial_tester = None
        self._usb_tester = None
        self._enabled_tests = 0

        model = self._hw_info.model()
        if kw.get('bootlog', True):
            self._bootlog_tester = BootlogTester(
                self._config, self._hw_info, self._logger
                )
            self._enabled_tests += 1
        if kw.get('ram', True):
            self._mem_tester = MemoryTester(
                self._config, self._hw_info, self._logger
                )
            self._enabled_tests += 1
        # do not perform stress test unless called for explicitly.
        if kw.get('ramstress', False):
            self._stressmemtester = MemoryTester(
                self._config, self._hw_info, self._logger, True
                )
            self._enabled_tests += 1
        if enable_eth_tests and kw.get('ethernet', True):
            self._eth_tester = NetworkTester(
                self._config, self._hw_info, self._logger
            )
            self._enabled_tests += 1
        if not model in ('TSWS', 'PC', 'Unknown', ''):
            self._avr_copro = avr()
            if kw.get('serial', True):
                self._serial_tester = SerialTester(
                    self._config, self._hw_info, self._logger
                )
                self._enabled_tests += 1
        if model in ('1200', '2400', '1500', '2500'):
            if kw.get('relayscounters', True):
                self._car_tester = CountersAndRelaysTester(
                    self._config, self._hw_info, self._logger, self._avr_copro
                    )
                self._enabled_tests += 1
            if kw.get('dallas', True):
                self._dallas_tester = DallasTester(
                    self._config, self._hw_info, self._logger, self._avr_copro
                    )
                self._enabled_tests += 1
        if model in ('1500', '2500') and kw.get('i2c', True):
            self._i2c_tester = I2CTester(
                self._config, self._hw_info, self._logger
                )
            self._enabled_tests += 1
        if self._hw_info.model() in ('2400', '2500') and kw.get('usb', True):
            self._usb_tester = USBTester(self._config, self._hw_info, self._logger)
            self._enabled_tests += 1
        return

    ###
    # Do a single pass test over all tests except the boot log.
    #
    # XXX: iteration is unused for now, just a placeholder.
    def _core_test(self, iteration):
        if not self._preflight_done:
            raise Exception("Pre-test checkout has not been done, aborting.\n")

        if self._car_tester:
            self._car_tester.runtest(self._burnin)

        if self._dallas_tester:
            self._dallas_tester.runtest(self._burnin)

        if self._eth_tester:
            self._eth_tester.runtest(1) # burnin set to 1 to avoid netperf tests

        if self._i2c_tester:
            self._i2c_tester.runtest(self._burnin)

        if self._mem_tester:
            self._mem_tester.runtest(self._burnin)
            
        if self._stressmemtester:
            self._stressmemtester.runtest(self._burnin)

        if self._modem_tester:
            self._modem_tester.runtest(self._burnin)

        if self._serial_tester:
            self._serial_tester.runtest(self._burnin)

        if self._usb_tester:
            self._usb_tester.runtest(self._burnin)

        return


    ###
    # Make sure that we have a valid serial number, and:
    #   - If in burn-in mode we don't have one, do not test.
    #   - If in production mode, get this info using a barcode scanner
    #     and program the serial number and mac address(es).
    def preflight_check(self):
        if self._preflight_done:
            return 1

        serialRE = re.compile("\d{2}-\d{5}", re.IGNORECASE)

        result = serialRE.search(self._hw_info.serialno())
        if result:
            self._preflight_done = 1
            return 1

        message('This unit needs a serial number and MAC address(es).')
        if self._burnin:
            message('Burn-in tests cannot be done on brand-new units.')
            return 0

        msg_testing('One moment while a connection is made to the HW test server...')
        m_address = self._config.def_macaddr()
        self._hw_info.set_mac_addr(0, m_address)
        os.system('ifconfig eth0 down')
        #os.system('rmmod natsemi; sleep 1; insmod -q natsemi')
        if_mac_addr = '%s:%s:%s:%s:%s:%s' % \
            (m_address[0:2], m_address[2:4], m_address[4:6], m_address[6:8], 
             m_address[8:10], m_address[10:])

        os.system('ifconfig eth0 hw ether %s' % if_mac_addr)
        os.system('ifconfig eth0 %s up' % self._config.def_ipaddr())

        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        try:
            s.connect((self._config.hwtest_server(), self._config.hwtest_port()))
        except:
            msg_fail()
            return 0

        message('done.\nYou may now scan the barcode on this unit.')

        # Send a hello message and then go into a wait.
        s.send('GreetZ_1138')
        data = s.recv(64)
        s.close()

        serial, mac0, mac1 = data.split(":")

        # Any of the following set_xxx calls will raise an exception on failure.
        msg_testing('Programming the serial number and MAC address(es) for this unit...')
        self._hw_info.set_serialno(serial)
        self._hw_info.set_mac_addr(0, mac0)
        if not mac1 == "":
            self._hw_info.set_mac_addr(1, mac1)
        message('done.')

        msg_testing('Restarting networking...')
        self._hw_info.restart_networking()
        self._hw_info.reload_values()
        message('done.')

        self._preflight_done = 1
        return 1

    def print_prerun_summary(self):
        if self._enabled_tests <= 1:
            return
        self._logger.msg('Mediator hardware test program revision %s.\n', hwtest_version())
        self._logger.msg('Copyright (C) 2009 Cisco Systems, Inc.  All rights reserved.\n\n')
        self._logger.msg('Test executed on %s\n' % os.popen('date').readline())
        self._logger.msg('Model number: %s\n' % self._hw_info.model())
        self._logger.msg('Framework version: %s\n' % properties.COMPOUND_VERSION)
        self._logger.msg('MOE version: %s\n' % properties.MOE_VERSION)
        self._logger.msg('Serial number: %s\n' % self._hw_info.serialno())
        self._logger.msg('eth0 MAC address: %s\n' % self._hw_info.mac_addr(0))
        if self._hw_info.model() in ('TSWS', '2400', '2500'):
           self._logger.msg('eth1 MAC address: %s\n' % self._hw_info.mac_addr(1))

        ###
        # Having a list of loaded modules will be extremely helpful in the
        # event a test should fail (because the module might not have loaded).
        self._logger.msg('Loaded kernel modules:\n')
        result, spewage = execute_command('/sbin/lsmod')
        if not result:
           raise Exception('Cannot get module info, aborting test.')

        for line in spewage:
            self._logger.msg(line)

        self._logger.msg('\n')
        return

    def tests_in_error(self):
        errs = 0
        if self._bootlog_tester:
            errs += self._bootlog_tester.nissues()

        if self._car_tester:
            errs += self._car_tester.nissues()

        if self._dallas_tester:
            errs += self._dallas_tester.nissues()

        if self._eth_tester:
            errs += self._eth_tester.nissues()

        if self._i2c_tester:
            errs += self._i2c_tester.nissues()

        if self._mem_tester:
            errs += self._mem_tester.nissues()

        if self._modem_tester:
            errs += self._modem_tester.nissues()
            
        if self._serial_tester:
            errs += self._serial_tester.nissues()

        if self._usb_tester:
            errs += self._usb_tester.nissues()
        return errs
    
    def report_results(self):
        if self._enabled_tests > 1:
            self._logger.msg('Test summary for unit %s:\n' % self._hw_info.serialno())

        # NOTE NOTE NOTE NOTE NOTE NOTE =---------------------------------
        #
        # Please report results in the order that the tests are done.
        # Like muscle memory, folks can develop visual memory as well.
        #
        # NOTE NOTE NOTE NOTE NOTE NOTE =---------------------------------

        if self._bootlog_tester:
            self._bootlog_tester.print_results()

        if self._car_tester:
            self._car_tester.print_results()

        if self._dallas_tester:
            self._dallas_tester.print_results()

        if self._eth_tester:
            self._eth_tester.print_results()

        if self._i2c_tester:
            self._i2c_tester.print_results()

        if self._mem_tester:
            self._mem_tester.print_results()

        if self._modem_tester:
            self._modem_tester.print_results()

        if self._serial_tester:
            self._serial_tester.print_results()

        if self._usb_tester:
            self._usb_tester.print_results()
            
        if self._enabled_tests > 1:
            self._logger.msg("=----- END OF REPORT ------------------------------=\n\n")

    ###
    # Examine the boot log and run all core tests once.
    def runtests(self):
        if not self._preflight_done:
            raise Exception("Pre-test checkout has not been done, aborting.\n")

        if self._bootlog_tester:
            self._bootlog_tester.runtest(self._burnin)

        self._core_test(1)
        return
Exemple #12
0
#!/usr/bin/env python

import sys
import unittest
import getpass
from random import choice
from logger import TestLogger

sys.path.insert(0, '.')
sys.path.insert(0, '../')
sys.path.insert(0, '../../')

from lendingclub import LendingClub, Order
from lendingclub.filters import Filter

logger = TestLogger()
lc = LendingClub(logger=logger)


class LiveTests(unittest.TestCase):

    def setUp(self):
        # Clear any existing orders
        lc.session.clear_session_order()

        # Override Order.__place_order so that no orders can be made
        Order._Order__place_order = self.place_order_override

        # Make sure that the override worked
        o = Order(lc)
        self.assertEqual(o._Order__place_order('token'), 12345)
    def __init__(self, burnin, enable_eth_tests, enable_modem_test=0, **kw):
        self._burnin = burnin
        self._preflight_done = 0

        # DO NOT CHANGE THE ORDER OF THESE THREE ITEMS!  Thank you!
        self._config = TestConfig(burnin, '/etc/hw_test.conf')
        self._hw_info = HardwareInfo()
        self._logger = TestLogger(burnin, self._hw_info.serialno())

        self._bootlog_tester = None
        self._car_tester = None
        self._dallas_tester = None
        self._eth_tester = None
        self._i2c_tester = None
        self._mem_tester = None
        self._stressmemtester = None
        self._modem_tester = None
        self._serial_tester = None
        self._usb_tester = None
        self._enabled_tests = 0

        model = self._hw_info.model()
        if kw.get('bootlog', True):
            self._bootlog_tester = BootlogTester(self._config, self._hw_info,
                                                 self._logger)
            self._enabled_tests += 1
        if kw.get('ram', True):
            self._mem_tester = MemoryTester(self._config, self._hw_info,
                                            self._logger)
            self._enabled_tests += 1
        # do not perform stress test unless called for explicitly.
        if kw.get('ramstress', False):
            self._stressmemtester = MemoryTester(self._config, self._hw_info,
                                                 self._logger, True)
            self._enabled_tests += 1
        if enable_eth_tests and kw.get('ethernet', True):
            self._eth_tester = NetworkTester(self._config, self._hw_info,
                                             self._logger)
            self._enabled_tests += 1
        if not model in ('TSWS', 'PC', 'Unknown', ''):
            self._avr_copro = avr()
            if kw.get('serial', True):
                self._serial_tester = SerialTester(self._config, self._hw_info,
                                                   self._logger)
                self._enabled_tests += 1
        if model in ('1200', '2400', '1500', '2500'):
            if kw.get('relayscounters', True):
                self._car_tester = CountersAndRelaysTester(
                    self._config, self._hw_info, self._logger, self._avr_copro)
                self._enabled_tests += 1
            if kw.get('dallas', True):
                self._dallas_tester = DallasTester(self._config, self._hw_info,
                                                   self._logger,
                                                   self._avr_copro)
                self._enabled_tests += 1
        if model in ('1500', '2500') and kw.get('i2c', True):
            self._i2c_tester = I2CTester(self._config, self._hw_info,
                                         self._logger)
            self._enabled_tests += 1
        if self._hw_info.model() in ('2400', '2500') and kw.get('usb', True):
            self._usb_tester = USBTester(self._config, self._hw_info,
                                         self._logger)
            self._enabled_tests += 1
        return
class TesterFramework(object):
    def __init__(self, burnin, enable_eth_tests, enable_modem_test=0, **kw):
        self._burnin = burnin
        self._preflight_done = 0

        # DO NOT CHANGE THE ORDER OF THESE THREE ITEMS!  Thank you!
        self._config = TestConfig(burnin, '/etc/hw_test.conf')
        self._hw_info = HardwareInfo()
        self._logger = TestLogger(burnin, self._hw_info.serialno())

        self._bootlog_tester = None
        self._car_tester = None
        self._dallas_tester = None
        self._eth_tester = None
        self._i2c_tester = None
        self._mem_tester = None
        self._stressmemtester = None
        self._modem_tester = None
        self._serial_tester = None
        self._usb_tester = None
        self._enabled_tests = 0

        model = self._hw_info.model()
        if kw.get('bootlog', True):
            self._bootlog_tester = BootlogTester(self._config, self._hw_info,
                                                 self._logger)
            self._enabled_tests += 1
        if kw.get('ram', True):
            self._mem_tester = MemoryTester(self._config, self._hw_info,
                                            self._logger)
            self._enabled_tests += 1
        # do not perform stress test unless called for explicitly.
        if kw.get('ramstress', False):
            self._stressmemtester = MemoryTester(self._config, self._hw_info,
                                                 self._logger, True)
            self._enabled_tests += 1
        if enable_eth_tests and kw.get('ethernet', True):
            self._eth_tester = NetworkTester(self._config, self._hw_info,
                                             self._logger)
            self._enabled_tests += 1
        if not model in ('TSWS', 'PC', 'Unknown', ''):
            self._avr_copro = avr()
            if kw.get('serial', True):
                self._serial_tester = SerialTester(self._config, self._hw_info,
                                                   self._logger)
                self._enabled_tests += 1
        if model in ('1200', '2400', '1500', '2500'):
            if kw.get('relayscounters', True):
                self._car_tester = CountersAndRelaysTester(
                    self._config, self._hw_info, self._logger, self._avr_copro)
                self._enabled_tests += 1
            if kw.get('dallas', True):
                self._dallas_tester = DallasTester(self._config, self._hw_info,
                                                   self._logger,
                                                   self._avr_copro)
                self._enabled_tests += 1
        if model in ('1500', '2500') and kw.get('i2c', True):
            self._i2c_tester = I2CTester(self._config, self._hw_info,
                                         self._logger)
            self._enabled_tests += 1
        if self._hw_info.model() in ('2400', '2500') and kw.get('usb', True):
            self._usb_tester = USBTester(self._config, self._hw_info,
                                         self._logger)
            self._enabled_tests += 1
        return

    ###
    # Do a single pass test over all tests except the boot log.
    #
    # XXX: iteration is unused for now, just a placeholder.
    def _core_test(self, iteration):
        if not self._preflight_done:
            raise Exception("Pre-test checkout has not been done, aborting.\n")

        if self._car_tester:
            self._car_tester.runtest(self._burnin)

        if self._dallas_tester:
            self._dallas_tester.runtest(self._burnin)

        if self._eth_tester:
            self._eth_tester.runtest(
                1)  # burnin set to 1 to avoid netperf tests

        if self._i2c_tester:
            self._i2c_tester.runtest(self._burnin)

        if self._mem_tester:
            self._mem_tester.runtest(self._burnin)

        if self._stressmemtester:
            self._stressmemtester.runtest(self._burnin)

        if self._modem_tester:
            self._modem_tester.runtest(self._burnin)

        if self._serial_tester:
            self._serial_tester.runtest(self._burnin)

        if self._usb_tester:
            self._usb_tester.runtest(self._burnin)

        return

    ###
    # Make sure that we have a valid serial number, and:
    #   - If in burn-in mode we don't have one, do not test.
    #   - If in production mode, get this info using a barcode scanner
    #     and program the serial number and mac address(es).
    def preflight_check(self):
        if self._preflight_done:
            return 1

        serialRE = re.compile("\d{2}-\d{5}", re.IGNORECASE)

        result = serialRE.search(self._hw_info.serialno())
        if result:
            self._preflight_done = 1
            return 1

        message('This unit needs a serial number and MAC address(es).')
        if self._burnin:
            message('Burn-in tests cannot be done on brand-new units.')
            return 0

        msg_testing(
            'One moment while a connection is made to the HW test server...')
        m_address = self._config.def_macaddr()
        self._hw_info.set_mac_addr(0, m_address)
        os.system('ifconfig eth0 down')
        #os.system('rmmod natsemi; sleep 1; insmod -q natsemi')
        if_mac_addr = '%s:%s:%s:%s:%s:%s' % \
            (m_address[0:2], m_address[2:4], m_address[4:6], m_address[6:8],
             m_address[8:10], m_address[10:])

        os.system('ifconfig eth0 hw ether %s' % if_mac_addr)
        os.system('ifconfig eth0 %s up' % self._config.def_ipaddr())

        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        try:
            s.connect(
                (self._config.hwtest_server(), self._config.hwtest_port()))
        except:
            msg_fail()
            return 0

        message('done.\nYou may now scan the barcode on this unit.')

        # Send a hello message and then go into a wait.
        s.send('GreetZ_1138')
        data = s.recv(64)
        s.close()

        serial, mac0, mac1 = data.split(":")

        # Any of the following set_xxx calls will raise an exception on failure.
        msg_testing(
            'Programming the serial number and MAC address(es) for this unit...'
        )
        self._hw_info.set_serialno(serial)
        self._hw_info.set_mac_addr(0, mac0)
        if not mac1 == "":
            self._hw_info.set_mac_addr(1, mac1)
        message('done.')

        msg_testing('Restarting networking...')
        self._hw_info.restart_networking()
        self._hw_info.reload_values()
        message('done.')

        self._preflight_done = 1
        return 1

    def print_prerun_summary(self):
        if self._enabled_tests <= 1:
            return
        self._logger.msg('Mediator hardware test program revision %s.\n',
                         hwtest_version())
        self._logger.msg(
            'Copyright (C) 2009 Cisco Systems, Inc.  All rights reserved.\n\n')
        self._logger.msg('Test executed on %s\n' % os.popen('date').readline())
        self._logger.msg('Model number: %s\n' % self._hw_info.model())
        self._logger.msg('Framework version: %s\n' %
                         properties.COMPOUND_VERSION)
        self._logger.msg('MOE version: %s\n' % properties.MOE_VERSION)
        self._logger.msg('Serial number: %s\n' % self._hw_info.serialno())
        self._logger.msg('eth0 MAC address: %s\n' % self._hw_info.mac_addr(0))
        if self._hw_info.model() in ('TSWS', '2400', '2500'):
            self._logger.msg('eth1 MAC address: %s\n' %
                             self._hw_info.mac_addr(1))

        ###
        # Having a list of loaded modules will be extremely helpful in the
        # event a test should fail (because the module might not have loaded).
        self._logger.msg('Loaded kernel modules:\n')
        result, spewage = execute_command('/sbin/lsmod')
        if not result:
            raise Exception('Cannot get module info, aborting test.')

        for line in spewage:
            self._logger.msg(line)

        self._logger.msg('\n')
        return

    def tests_in_error(self):
        errs = 0
        if self._bootlog_tester:
            errs += self._bootlog_tester.nissues()

        if self._car_tester:
            errs += self._car_tester.nissues()

        if self._dallas_tester:
            errs += self._dallas_tester.nissues()

        if self._eth_tester:
            errs += self._eth_tester.nissues()

        if self._i2c_tester:
            errs += self._i2c_tester.nissues()

        if self._mem_tester:
            errs += self._mem_tester.nissues()

        if self._modem_tester:
            errs += self._modem_tester.nissues()

        if self._serial_tester:
            errs += self._serial_tester.nissues()

        if self._usb_tester:
            errs += self._usb_tester.nissues()
        return errs

    def report_results(self):
        if self._enabled_tests > 1:
            self._logger.msg('Test summary for unit %s:\n' %
                             self._hw_info.serialno())

        # NOTE NOTE NOTE NOTE NOTE NOTE =---------------------------------
        #
        # Please report results in the order that the tests are done.
        # Like muscle memory, folks can develop visual memory as well.
        #
        # NOTE NOTE NOTE NOTE NOTE NOTE =---------------------------------

        if self._bootlog_tester:
            self._bootlog_tester.print_results()

        if self._car_tester:
            self._car_tester.print_results()

        if self._dallas_tester:
            self._dallas_tester.print_results()

        if self._eth_tester:
            self._eth_tester.print_results()

        if self._i2c_tester:
            self._i2c_tester.print_results()

        if self._mem_tester:
            self._mem_tester.print_results()

        if self._modem_tester:
            self._modem_tester.print_results()

        if self._serial_tester:
            self._serial_tester.print_results()

        if self._usb_tester:
            self._usb_tester.print_results()

        if self._enabled_tests > 1:
            self._logger.msg(
                "=----- END OF REPORT ------------------------------=\n\n")

    ###
    # Examine the boot log and run all core tests once.
    def runtests(self):
        if not self._preflight_done:
            raise Exception("Pre-test checkout has not been done, aborting.\n")

        if self._bootlog_tester:
            self._bootlog_tester.runtest(self._burnin)

        self._core_test(1)
        return
Exemple #15
0
 def setUpClass(cls):
     cls.logger = TestLogger("ParserXls")
     file_path = cls.root_dir + "test_file.xlsx"
     cls.parser = ParserXls(file_path=file_path)
     cls.file_fonte = cls.parser.open_file(sheet_name=None)
 def __init__(self):
     self._authenticate()
     self.tenant_id = tenant.get_tenant_id(self.bearer_token, self.org_guid, self.space_guid)
     self.logger = TestLogger(__name__)
def test(args):
    print ("Stage 1")
    model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    print ("Stage 2")
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    print ("Stage 3")
    model.eval()
    print ("Stage 4")
    data_loader = CTDataLoader(args, phase=args.phase, is_training=False)
    study2slices = defaultdict(list)
    study2probs = defaultdict(list)
    study2labels = {}
    logger = TestLogger(args, len(data_loader.dataset), data_loader.dataset.pixel_dict)

    means = []

    # Get model outputs, log to TensorBoard, write masks to disk window-by-window
    util.print_err('Writing model outputs to {}...'.format(args.results_dir))
    with tqdm(total=len(data_loader.dataset), unit=' windows') as progress_bar:
        for i, (inputs, targets_dict) in enumerate(data_loader):
            means.append(inputs.mean().data[0])
            with torch.no_grad():
                cls_logits = model.forward(inputs.to(args.device))
                cls_probs = F.sigmoid(cls_logits)

            if args.visualize_all:
                logger.visualize(inputs, cls_logits, targets_dict=None, phase=args.phase, unique_id=i)

            max_probs = cls_probs.to('cpu').numpy()
            for study_num, slice_idx, prob in \
                    zip(targets_dict['study_num'], targets_dict['slice_idx'], list(max_probs)):
                # Convert to standard python data types
                study_num = int(study_num)
                slice_idx = int(slice_idx)

                # Save series num for aggregation
                study2slices[study_num].append(slice_idx)
                study2probs[study_num].append(prob.item())

                series = data_loader.get_series(study_num)
                if study_num not in study2labels:
                    study2labels[study_num] = int(series.is_positive)

            progress_bar.update(inputs.size(0))
    
    # Combine masks
    util.print_err('Combining masks...')
    max_probs = []
    labels = []
    predictions = {}
    print("Get max probability")
    for study_num in tqdm(study2slices):

        # Sort by slice index and get max probability
        slice_list, prob_list = (list(t) for t in zip(*sorted(zip(study2slices[study_num], study2probs[study_num]),
                                                              key=lambda slice_and_prob: slice_and_prob[0])))
        study2slices[study_num] = slice_list
        study2probs[study_num] = prob_list
        max_prob = max(prob_list)
        max_probs.append(max_prob)
        label = study2labels[study_num]
        labels.append(label)
        predictions[study_num] = {'label':label, 'pred':max_prob}

    #Save predictions to file, indexed by study number
    print("Save to pickle")
    with open('{}/preds.pickle'.format(args.results_dir),"wb") as fp:
        pickle.dump(predictions,fp)
        
    # Write features for XGBoost
    save_for_xgb(args.results_dir, study2probs, study2labels)
    # Write the slice indices used for the features
    print("Write slice indices")
    with open(os.path.join(args.results_dir, 'xgb', 'series2slices.json'), 'w') as json_fh:
        json.dump(study2slices, json_fh, sort_keys=True, indent=4)

    # Compute AUROC and AUPRC using max aggregation, write to files
    max_probs, labels = np.array(max_probs), np.array(labels)
    metrics = {
        args.phase + '_' + 'AUPRC': sk_metrics.average_precision_score(labels, max_probs),
        args.phase + '_' + 'AUROC': sk_metrics.roc_auc_score(labels, max_probs),
    }
    print("Write metrics")
    with open(os.path.join(args.results_dir, 'metrics.txt'), 'w') as metrics_fh:
        for k, v in metrics.items():
            metrics_fh.write('{}: {:.5f}\n'.format(k, v))

    curves = {
        args.phase + '_' + 'PRC': sk_metrics.precision_recall_curve(labels, max_probs),
        args.phase + '_' + 'ROC': sk_metrics.roc_curve(labels, max_probs)
    }
    for name, curve in curves.items():
        curve_np = util.get_plot(name, curve)
        curve_img = Image.fromarray(curve_np)
        curve_img.save(os.path.join(args.results_dir, '{}.png'.format(name)))
Exemple #18
0
def test(args):
    # Get loader for z-test
    loader = get_loader(args, phase='test')
    batch_size = args.batch_size
    class_vector = None

    # TODO: make into function that takes in args.model and returns the pretrained model
    #       and also consider whether it's class conditional and what kind of class conditional (how many classes) -> probably just imagenet now, actually maybe cifar-10 too
    #       and also consider add truncation sampling as option too - this should return model, z_test noise vec, and class_vec (optionally)
    if args.ckpt_path and not args.use_pretrained:
        model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    else:
        if 'BigGAN' in args.model:
            num_params = int(''.join(filter(str.isdigit, args.model)))

            if 'perturbation' in args.loss_fn:
                # Use custom BigGAN with Perturbation Net wrapper
                model = models.BigGANPerturbationNet.from_pretrained(
                    f'biggan-deep-{num_params}')
            else:
                # Use pretrained BigGAN from package
                model = BigGAN.from_pretrained(f'biggan-deep-{num_params}')

            z_test = truncated_noise_sample(truncation=args.truncation,
                                            batch_size=batch_size)
            z_test = torch.from_numpy(z_test)

            # Get class conditional label
            # 981 is baseball player
            # 207 is golden retriever
            # TODO: Conditional generation only
            class_vector = one_hot_from_int(207, batch_size=batch_size)
            class_vector = torch.from_numpy(class_vector)

        elif 'WGAN-GP' in args.model:
            generator_path = "/deep/group/gen-eval/model-training/src/GAN_models/improved-wgan-pytorch/experiments/exp4_wgan_gp/generator.pt"
            model = torch.load(generator_path)
            z_test = torch.randn(batch_size, 128)

        elif 'BEGAN' in args.model:
            generator_path = "/deep/group/gen-eval/model-training/src/GAN_models/BEGAN-pytorch/trained_models/64/models/gen_97000.pth"
            model = models.BEGANGenerator()
            model.load_state_dict(torch.load(generator_path))

            z_test = np.random.uniform(-1, 1, size=(batch_size, 64))
            z_test = torch.FloatTensor(z_test)

    # Freeze model instead of using .eval()
    for param in model.parameters():
        param.requires_grad = False

    # If using perturbation net, learn perturbation layers
    if 'perturbation' in args.loss_fn:
        trainable_params = []
        for name, param in model.named_parameters():
            if 'perturb' in name:
                param.requires_grad = True
                trainable_params.append(param)
        print(f'Number of trainable params: {len(trainable_params)}')

    model = nn.DataParallel(model, args.gpu_ids)
    model = model.to(args.device)

    # Loss functions
    if 'mse' in args.loss_fn:
        pixel_criterion = torch.nn.MSELoss().to(args.device)
    else:
        pixel_criterion = torch.nn.L1Loss().to(args.device)

    if 'perceptual' in args.loss_fn:
        # Combination pixel-perceptual loss - Sec 3.2. By default, uses pixel L1.
        perceptual_criterion = torch.nn.L1Loss().to(args.device)
        perceptual_loss_weight = args.perceptual_loss_weight

        vgg_feature_extractor = models.VGGFeatureExtractor().to(args.device)
        vgg_feature_extractor.eval()
    elif 'perturbation' in args.loss_fn:
        # Perturbation network R. By default, uses pixel L1.
        # Sec 3.3: http://ganpaint.io/Bau_et_al_Semantic_Photo_Manipulation_preprint.pdf
        reg_loss_weight = args.reg_loss_weight

    # z_loss_fn = util.get_loss_fn(args.loss_fn, args)
    max_z_test_loss = 100.  # TODO: actually put max value possible here

    # Get logger, saver
    logger = TestLogger(args)
    # saver = ModelSaver(args) TODO: saver for perturbation network R

    print(f'Logs: {logger.log_dir}')
    print(f'Ckpts: {args.save_dir}')

    # Run z-test in batches
    logger.log_hparams(args)

    while not logger.is_finished_training():
        logger.start_epoch()

        for _, z_test_target, mask in loader:
            logger.start_iter()
            if torch.cuda.is_available():
                mask = mask.cuda()
                z_test = z_test.cuda()
                z_test_target = z_test_target.cuda()
                #class_vector = class_vector.cuda()

            masked_z_test_target = z_test_target * mask
            obscured_z_test_target = z_test_target * (1.0 - mask)

            if 'perturbation' in args.loss_fn:
                # With backprop on only trainable parameters in perturbation net
                params = trainable_params + [z_test.requires_grad_()]
                z_optimizer = util.get_optimizer(params, args)
            else:
                # With backprop on only the input z, run one step of z-test and get z-loss
                z_optimizer = util.get_optimizer([z_test.requires_grad_()],
                                                 args)

            with torch.set_grad_enabled(True):

                if class_vector is not None:
                    z_probs = model.forward(z_test, class_vector,
                                            args.truncation).float()
                    z_probs = (z_probs + 1) / 2.
                else:
                    z_probs = model.forward(z_test).float()

                # Calculate the masked loss using z-test vector
                masked_z_probs = z_probs * mask
                z_loss = torch.zeros(1, requires_grad=True).to(args.device)

                pixel_loss = torch.zeros(1, requires_grad=True).to(args.device)
                pixel_loss = pixel_criterion(masked_z_probs,
                                             masked_z_test_target)

                if 'perceptual' in args.loss_fn:
                    z_probs_features = vgg_feature_extractor(masked_z_probs)
                    z_test_features = vgg_feature_extractor(
                        masked_z_test_target).detach()

                    perceptual_loss = torch.zeros(1, requires_grad=True).to(
                        args.device)
                    perceptual_loss = perceptual_criterion(
                        z_probs_features, z_test_features)

                    z_loss = pixel_loss + perceptual_loss_weight * perceptual_loss
                elif 'perturbation' in args.loss_fn:
                    reg_loss = torch.zeros(1,
                                           requires_grad=True).to(args.device)
                    for name, param in model.named_parameters():
                        if 'perturb' in name:
                            delta = param - 1
                            reg_loss += torch.pow(delta, 2).mean()  #sum()
                    z_loss = pixel_loss + reg_loss_weight * reg_loss
                else:
                    z_loss = pixel_loss

                # Backprop on z-test vector
                z_loss.backward()
                z_optimizer.step()
                z_optimizer.zero_grad()

            # Compute the full loss (without mask) and obscured loss (loss only on masked region)
            # For logging and final evaluation (obscured loss is final MSE), so not in backprop loop
            full_z_loss = torch.zeros(1)
            full_pixel_loss = torch.zeros(1)
            full_pixel_loss = pixel_criterion(z_probs, z_test_target)  #.mean()

            obscured_z_probs = z_probs * (1.0 - mask)
            obscured_z_loss = torch.zeros(1)
            obscured_pixel_loss = torch.zeros(1)
            obscured_pixel_loss = pixel_criterion(
                obscured_z_probs, obscured_z_test_target)  #.mean()

            if 'perceptual' in args.loss_fn:
                # Full loss
                z_probs_full_features = vgg_feature_extractor(z_probs).detach()
                z_test_full_features = vgg_feature_extractor(
                    z_test_target).detach()

                full_perceptual_loss = torch.zeros(1)
                full_perceptual_loss = perceptual_criterion(
                    z_probs_full_features, z_test_full_features)

                full_z_loss = full_pixel_loss + perceptual_loss_weight * full_perceptual_loss

                # Obscured loss
                z_probs_obscured_features = vgg_feature_extractor(
                    z_probs).detach()
                z_test_obscured_features = vgg_feature_extractor(
                    z_test_target).detach()

                obscured_perceptual_loss = torch.zeros(1)
                obscured_perceptual_loss = perceptual_criterion(
                    z_probs_obscured_features, z_test_obscured_features)

                obscured_z_loss = obscured_pixel_loss + perceptual_loss_weight * obscured_perceptual_loss
            elif 'perturbation' in args.loss_fn:
                full_z_loss = full_pixel_loss + reg_loss_weight * reg_loss
                obscured_z_loss = obscured_pixel_loss + reg_loss_weight * reg_loss
            else:
                full_z_loss = full_pixel_loss
                obscured_z_loss = obscured_pixel_loss
            """# TODO: z_loss is not always MSE anymore - figure out desired metric
            if z_loss < max_z_test_loss:
                # Save MSE on obscured region # TODO: z_loss is not always MSE anymore - figure out desired metric
                final_metrics = {'z-loss': z_loss.item(), 'obscured-z-loss': obscured_z_loss.item()}
                logger._log_scalars(final_metrics)
                print('Recall (z loss - non obscured loss - if MSE)', z_loss) 
                print('Precision (MSE value on masked region)', obscured_z_loss)
            """

            # Log both train and eval model settings, and visualize their outputs
            logger.log_status(
                masked_probs=masked_z_probs,
                masked_loss=z_loss,
                masked_test_target=masked_z_test_target,
                full_probs=z_probs,
                full_loss=full_z_loss,
                full_test_target=z_test_target,
                obscured_probs=obscured_z_probs,
                obscured_loss=obscured_z_loss,
                obscured_test_target=obscured_z_test_target,
                save_preds=args.save_preds,
            )

            logger.end_iter()

        logger.end_epoch()

    # Last log after everything completes
    logger.log_status(
        masked_probs=masked_z_probs,
        masked_loss=z_loss,
        masked_test_target=masked_z_test_target,
        full_probs=z_probs,
        full_loss=full_z_loss,
        full_test_target=z_test_target,
        obscured_probs=obscured_z_probs,
        obscured_loss=obscured_z_loss,
        obscured_test_target=obscured_z_test_target,
        save_preds=args.save_preds,
        force_visualize=True,
    )
Exemple #19
0
 def setUpClass(cls):
     cls.test_logger = TestLogger.setup_custom_logger("test_log")
Exemple #20
0
# =COPYRIGHT=======================================================
# Licensed Materials - Property of IBM
#
# (c) Copyright IBM Corp. 2017, 2018 All Rights Reserved
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with IBM Corp.
# =================================================================

import os
import requests
import lib.env as env
from logger import TestLogger
from retrying import retry

logger = TestLogger(__name__)


def authenticate():
    bearer_token = _get_bearer_token()
    org_guid = _get_org_guid(bearer_token)
    space_guid = _get_space_guid(bearer_token, org_guid)

    return [bearer_token, org_guid, space_guid]


# retry: 5 times to get bearer_token, with 2sec exponential backoff (max 10-sec between tries)
@retry(stop_max_attempt_number=5,
       wait_exponential_multiplier=2000,
       wait_exponential_max=10000)
def _get_bearer_token():
def test(args):
    print ("Stage 1")
    model, ckpt_info = ModelSaver.load_model(args.ckpt_path, args.gpu_ids)
    print ("Stage 2")
    args.start_epoch = ckpt_info['epoch'] + 1
    model = model.to(args.device)
    print ("Stage 3")
    model.eval()
    print ('This should be false: {}'.format(model.training))
    print ("Stage 4")
    data_loader = CTDataLoader(args, phase=args.phase, is_training=False)
    #print('data_loader={}'.format(data_loader))
    #print('data_loader.dataset={}'.format(data_loader.dataset))
    study2slices = defaultdict(list)
    study2probs = defaultdict(list)
    study2labels = {}
    logger = TestLogger(args, len(data_loader.dataset), data_loader.dataset.pixel_dict)
    print("Stage 5")
    f = open('/projectnb/ece601/kaggle-pulmonary-embolism/meganmp/train/series_list.pkl','rb')
    data_labels = pickle.load(f)

    # Create list to manually process labels
    #with open('positive.txt') as f:
         #pos_labels = f.readlines()
    #pos_labels = [x.strip() for x in pos_labels]
    ispos = [x.is_positive for x in data_labels]
    isposidx = [x.study_num for x in data_labels]
    label_dict = {}
    for i in range(len(ispos)):
        label_dict[isposidx[i]] = ispos[i]

    for key in label_dict.keys():
        print('label_dict={}\t{}'.format(key, label_dict[key]))
    

    # Get model outputs, log to TensorBoard, write masks to disk window-by-window
    util.print_err('Writing model outputs to {}...'.format(args.results_dir))
    with tqdm(total=len(data_loader.dataset), unit=' windows') as progress_bar:
        for i, (inputs, targets_dict) in enumerate(data_loader):
            with torch.no_grad():
                cls_logits = model.forward(inputs.to(args.device))
                cls_probs = torch.sigmoid(cls_logits)

            if args.visualize_all:
                logger.visualize(inputs, cls_logits, targets_dict=None, phase=args.phase, unique_id=i)

            max_probs = cls_probs.to('cpu').numpy()
            for study_num, slice_idx, prob in \
                    zip(targets_dict['study_num'], targets_dict['slice_idx'], list(max_probs)):
                #print('targets_dict[studynum]={}'.format(targets_dict['study_num']))
                #print('targets_dict[sliceidx]={}'.format(targets_dict['slice_idx']))
                # Convert to standard python data types
                study_num = study_num #.item()
                #study_num = int(study_num)
                slice_idx = int(slice_idx)

                # Save series num for aggregation
                study2slices[study_num].append(slice_idx)
                study2probs[study_num].append(prob.item())


                series = data_loader.get_series(study_num)
                if study_num not in study2labels:
                    print('study_num={}'.format(study_num))
                    print('series.is_positive={}'.format(label_dict[study_num]))
                    study2labels[study_num] = label_dict[study_num]
                    #if study_num in pos_labels:
                        #print('DEBUG -------=1?-------------------')
                        #print('POS LABEL')
                        #print('study_num={}'.format(study_num))
                        #study2labels[study_num] = 1
                    #else:
                        #print('Not in study2labels. series = {}'.format(study_num))
                        #print('series.is_positive={}'.format(series.is_positive))
                        #study2labels[study_num] = int(series.is_positive)
                        #print('study2labels: {}'.format(study2labels[study_num]))

            progress_bar.update(inputs.size(0))

    print('study2labels={}'.format(study2labels))

    # Combine masks
    util.print_err('Combining masks...')
    max_probs = []
    labels = []
    predictions = {}
    print("Get max prob")
    for study_num in tqdm(study2slices):

        # Sort by slice index and get max probability
        slice_list, prob_list = (list(t) for t in zip(*sorted(zip(study2slices[study_num], study2probs[study_num]),
                                                              key=lambda slice_and_prob: slice_and_prob[0])))
        study2slices[study_num] = slice_list
        study2probs[study_num] = prob_list
        max_prob = max(prob_list)
        print('study={}\tmax_prob={}'.format(study_num, max_prob))
        max_probs.append(max_prob)
        label = study2labels[study_num]
        labels.append(label)
        predictions[study_num] = {'label':label, 'pred':max_prob}

    #Save predictions to file, indexed by study number
    print("Saving predictions to pickle files")
    with open('{}/preds.pickle'.format(args.results_dir),"wb") as fp:
        pickle.dump(predictions,fp)

    results_series = [k for k,_ in predictions.items()]
    results_pred = [v['pred'] for _,v in predictions.items()]
    results_label = [v['label'] for _,v in predictions.items()]
    print('roc_auc_score={}'.format(roc_auc_score(results_label, results_pred)))

    # Create dataframe summary
    TRAIN_CSV = '/projectnb/ece601/kaggle-pulmonary-embolism/rsna-str-pulmonary-embolism-detection/train.csv'
    train_df = pd.read_csv(TRAIN_CSV)
    train_df = train_df[['SeriesInstanceUID', 'negative_exam_for_pe']]
    train_df = train_df.groupby('SeriesInstanceUID').aggregate(list)
    train_df['pe_label'] = train_df['negative_exam_for_pe'].apply(lambda x: 0 if 1 in x else 1)

    results_dict = {
        'series': results_series,
        'pred': results_pred
    }
    results_df = pd.DataFrame.from_dict(results_dict)

    results_df = results_df.set_index('series')
    results_df = results_df.join(train_df, how='left').reset_index().rename({'index': 'series'})
    print('roc_auc_score={}'.format(roc_auc_score(results_df['pe_label'], results_df['pred'])))
    
    # Calculate confusion matrix
    results_df['interpretation'] = results_df['pred'].apply(lambda x: 0 if x < 0.5 else 1)
    print(results_df.head(10))
    tn, fp, fn, tp = confusion_matrix(results_df['pe_label'], results_df['interpretation']).ravel()
    print('confusion_matrix: [{} {} {} {}]'.format(tp, fp, fn, tn))
 def setUp(self):
     self.logger = TestLogger()
     self.session = session.Session(logger=self.logger)
Exemple #23
0
 def setUp(self):
     self.logger = TestLogger()
     self.session = session.Session(logger=self.logger)
     self.session.base_url = 'http://127.0.0.1:8000/'
class IaaS(object):
    '''
    IaaS APIs
    '''

    def __init__(self):
        self._authenticate()
        self.tenant_id = tenant.get_tenant_id(self.bearer_token, self.org_guid, self.space_guid)
        self.logger = TestLogger(__name__)

    def _authenticate(self):
        auth_response = auth.authenticate()
        self.bearer_token = auth_response[0]
        self.org_guid = auth_response[1]
        self.space_guid = auth_response[2]

    def _get_request_params(self):
        return {'ace_orgGuid': self.org_guid, 'cloudOE_spaceGuid': self.space_guid, 'tenantId': self.tenant_id}

    def _get_request_header(self):
        return {'Authorization': self.bearer_token['token_type'] + ' ' + self.bearer_token['access_token'],
                'Accept': 'application/json'}


    def _get_variable_value(self, parameters, param):
        try:
            # use 'default', if not found, try 'value'
            value = None
            if 'autogenerate' in parameters['variable'][param]:
                value =  _generate_value(parameters['variable'][param]['autogenerate'])
            elif 'default' in parameters['variable'][param]:
                value = parameters['variable'][param]['default']
            else:
                value = parameters['variable'][param]['value']
            return value
        except KeyError:
            self.logger.error('Unable to find a value for variable: %s' %param)
            raise # re-raise


    def _build_request_parameters_old(self, parameters):
        # old-style
        request_parameters = {} # was None, now empty Hash
        for param in parameters['variable']:
            request_parameters[param] = self._get_variable_value(parameters, param)
        return request_parameters


    def _build_request_parameters_camVariables(self, parameters, camVariables):
        for varMap in camVariables:
            if not 'default' in varMap and not 'value' in varMap:
                # need to set a value for this camVariable entry
                if not 'name' in varMap: raise ValueError("Missing 'name' in: %s", varMap)
                varMap['value'] = self._get_variable_value(parameters, varMap['name'])
        return camVariables


    def _build_request_parameters(self, parameters, camVariables):
        request_parameters = None
        # request_parameters have two different formats, based on whether we
        # are using camvariables.json or not
        if not camVariables:
            request_parameters = self._build_request_parameters_old(parameters)
        else:
            request_parameters = self._build_request_parameters_camVariables(parameters, camVariables)
        return request_parameters


    def deploy(self, stack_name, template, parameters, camVariables, retry=True):
        '''
        Deploys the template
        '''

        template_format = "JSON" if template.strip().startswith("{") else "HCL"
        # parse the template and find the provider
        template_parsed = hcl.loads(template) # parse the JSON/HCL template into dict
        template_provider = template_parsed['provider'].keys()[0]

        self.logger.info('Deploying %s...' % stack_name)
        # find an appropriate cloud connection id
        cloud_connection = self.get_cloud_connection(template_provider)

        request_data = {
            "name": stack_name,
            "template_type": "Terraform",
            "template_format": template_format,
            "cloud_connection_ids": [
                str(cloud_connection['id'])
            ],
            "template": template,
            "catalogName": stack_name,
            "catalogType": "starter",
            "tenantId": self.tenant_id,
            "parameters": self._build_request_parameters(parameters, camVariables)
        }

        request_header = self._get_request_header()
        request_header['Content-Type'] = 'application/json'
        _request_data = json.dumps(request_data)

        response = requests.post(env.IAAS_HOST + '/stacks',
                                 data=_request_data,
                                 params=self._get_request_params(),
                                 headers=request_header,
                                 timeout=60)
        if response.status_code == 401:
            self.handleAuthError(response, retry)
            return self.deploy(stack_name, template, parameters, retry=False)
        if response.status_code != 200:
            raise Exception(
                "Failed to create stack %s, status code is %s\nresponse headers:\n%s\nresponse:\n%s\n\nrequest url:\n%s\nrequest headers:\n%s\nrequest body:\n%s" % (
                    request_data['name'], response.status_code, response.headers, response.content, response.request.url,
                    response.request.headers, response.request.body))

        stack = response.json()
        stack_id = stack['id']

        response = requests.post(env.IAAS_HOST + '/stacks/' + stack_id + '/create',
                                 data=json.dumps(stack),
                                 params=self._get_request_params(),
                                 headers=request_header,
                                 timeout=60)

        if response.status_code == 401:
            self.handleAuthError(response, retry)
            return self.deploy(stack_name, template, parameters, retry=False)
        if response.status_code != 200:
            raise Exception(
                "Failed to deploy %s, status code is %s\nresponse headers:\n%s\nresponse:\n%s\n\nrequest url:\n%s\nrequest headers:\n%s\nrequest body:\n%s" %
                (request_data['name'], response.status_code, response.headers, response.content, response.request.url,
                 response.request.headers, response.request.body))

        return response.json()

    def handleAuthError(self, response, retry):
        if retry:
            self.logger.warning(
                'Authentication error\nstatus code is %s\nresponse headers:\n%s\nresponse:\n%s\n\nrequest url:\n%s\nrequest headers:\n%s\nrequest body:\n%s' % (
                    response.status_code, response.headers, response.content, response.request.url,
                    response.request.headers, response.request.body))
            self._authenticate()
        else:
            raise AuthException(
                'Authentication error\nstatus code is %s\nresponse headers:\n%s\nresponse:\n%s\n\nrequest url:\n%s\nrequest headers:\n%s\nrequest body:\n%s' % (
                    response.status_code, response.headers, response.content, response.request.url,
                    response.request.headers, response.request.body))

    def get_cloud_connections(self):
        '''
        Retrieves all cloud connections
        '''
        request_header = self._get_request_header()
        return requests.get(env.IAAS_HOST + '/cloudconnections',
                            params=self._get_request_params(),
                            headers=request_header,
                            timeout=60)

    def delete(self, stack, retry=True):
        '''
        Delete the stack from IaaS
        '''
        self.logger.info('Deleting %s' % stack['name'])
        request_header = self._get_request_header()
        response = requests.delete(env.IAAS_HOST + '/stacks/' + stack['id'],
                                   params=self._get_request_params(),
                                   headers=request_header,
                                   timeout=60)
        if response.status_code == 401:
            self.handleAuthError(response, retry)
            return self.delete(stack, retry=False)
        if response.status_code > 300:
            raise Exception(
                "Failed to delete %s, status code is %s\nresponse headers:\n%s\nresponse:\n%s\n\nrequest url:\n%s\nrequest headers:\n%s\nrequest body:\n%s" %
                (stack['name'], response.status_code, response.headers, response.content, response.request.url,
                 response.request.headers, response.request.body))

    def destroy(self, stack, retry=True):
        '''
        Destroy the stack from the infrastructure
        '''
        self.logger.info('Destroying %s' % stack['name'])
        request_header = self._get_request_header()
        response = requests.post(env.IAAS_HOST + '/stacks/' + stack['id'] + '/delete',
                                 data=json.dumps(stack),
                                 params=self._get_request_params(),
                                 headers=request_header,
                                 timeout=60)
        if response.status_code == 401:
            self.handleAuthError(response, retry)
            return self.destroy(stack, retry=False)
        if response.status_code > 300:
            raise Exception(
                "Failed to destroy %s, status code is %s\nresponse headers:\n%s\nresponse:\n%s\n\nrequest url:\n%s\nrequest headers:\n%s\nrequest body:\n%s" %
                (stack['name'], response.status_code, response.headers, response.content, response.request.url,
                 response.request.headers, response.request.body))


    # retry: 10 times to get stack details, with 1sec exponential backoff (max 10-sec between tries)
    @retry(stop_max_attempt_number=10, wait_exponential_multiplier=1000, wait_exponential_max=10000)
    def retrieve(self, stack):
        '''
        Retrieves the stack details
        '''
        # self.logger.info('Retrieving the stack details')
        request_header = self._get_request_header()
        return requests.post(env.IAAS_HOST + '/stacks/' + stack['id'] + '/retrieve',
                             params=self._get_request_params(),
                             headers=request_header,
                             timeout=60)

    def waitForSuccess(self, stack, timeoutInSeconds):
        '''
        Wait until the stack job (deploy or destroy) finishes successfully
        '''

        sleep_time = POLL_INTERVAL
        count = 0

        while True:
            self.logger.info("Waiting for job of stack id %s" % stack['id'])
            response = self.retrieve(stack)
            if response.status_code == 401:
                self.logger.warning(
                    'Authentication error\nstatus code is %s\nresponse headers:\n%s\nresponse:\n%s\n\nrequest url:\n%s\nrequest headers:\n%s\nrequest body:\n%s' % (
                        response.status_code, response.headers, response.content, response.request.url,
                        response.request.headers, response.request.body))
                self._authenticate()
                continue
            if response.status_code != 200:
                self.logger.error(
                    "Invalid response status code %s\nresponse:\n%s\n\nrequest url:\n%s" %
                    (response.status_code, response.content, response.request.url))

            if response.json()['status'] == "SUCCESS":
                self.logger.info("Job for stack id %s finished successfully." % stack['id'])
                return response.json()
            elif response.json()['status'] != "IN_PROGRESS":
                self.logger.info(json.dumps(response.json(), indent=4, separators=(',',': ')))
                raise Exception("Job for stack id %s failed with status %s" % (stack['id'], response.json()['status']))

            count += 1
            if count > (timeoutInSeconds / sleep_time):
                raise Exception("The job for stack %s did not complete in %s." % (stack['id'], timeoutInSeconds))
            #
            # sleep before polling the job result
            #
            time.sleep(sleep_time)

    def get_cloud_connection(self, template_provider, retry=True):
        '''
        Retrieves the cloud connection based on the template provider
        '''
        cloud_connection_name = None
        if template_provider.lower() == 'amazon ec2' or template_provider.lower() == 'amazonec2' or template_provider.lower() == 'aws':
            cloud_connection_name = os.environ['AWS_CLOUD_CONNECTION']
        elif template_provider.lower() == 'ibm cloud' or template_provider.lower() == 'ibmcloud':
            cloud_connection_name = os.environ['IBMCLOUD_CLOUD_CONNECTION']
        elif template_provider.lower() == 'vsphere' or template_provider.lower() == 'vmware':
            cloud_connection_name = os.environ['VMWARE_CLOUD_CONNECTION']
        else:
            raise Exception('Invalid template provider %s' % template_provider)

        request_header = self._get_request_header()
        response = requests.get(env.IAAS_HOST + '/cloudconnections',
                                params=self._get_request_params(),
                                headers=request_header,
                                timeout=60)
        if response.status_code == 401:
            self.handleAuthError(response, retry)
            return self.get_cloud_connection(template_provider, retry=False)
        if response.status_code != 200:
            raise Exception(
                "Failed to retrieve the cloud connections, status code %s\nresponse headers:\n%s\nresponse:\n%s\n\nrequest url:\n%s\nrequest headers:\n%s\nrequest body:\n%s"
                % (response.status_code, response.headers, response.content, response.request.url,
                   response.request.headers, response.request.body))

        try:
            cloud_connections = response.json()
        except:
            raise Exception(
                "Failed to parse JSON\nstatus code%s\nresponse headers:\n%s\nresponse\n%s\n\nrequest url:\n%s\nrequest headers:\n%s\nrequest body:\n%s" %
                (response.status_code, response.headers, response.content, response.request.url,
                 response.request.headers, response.request.body))
        for cloud_connection in cloud_connections:
            if cloud_connection['name'] == cloud_connection_name:
                return cloud_connection

        raise Exception('Cloud connection %s does not exist' %
                        cloud_connection_name)