Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser(description='Train a neural imaging pipeline')
    parser.add_argument('--cam', dest='camera', action='store', help='camera', default='D90')
    parser.add_argument('--dir', dest='root_dir', action='store', default='/tmp/neural-imaging',
                        help='output directory for temporary results, default: /tmp/neural-imaging')
    parser.add_argument('--verbose', dest='verbose', action='store_true', default=False,
                        help='print the output of tested tools, default: false')
    parser.add_argument('--keep', dest='keep', action='store_true', default=False,
                        help='do not remove the test root directory')
    parser.add_argument('--tests', dest='tests', action='store', default=None,
                        help='list of tests to run')

    args = parser.parse_args()

    utils.setup_logging()
    tf_helpers.disable_warnings()
    tf_helpers.print_versions()

    with open('config/tests/framework.json') as f:
        settings = json.load(f)

    if os.path.exists(args.root_dir) and not args.keep:
        print('\n> deleting {}'.format(args.root_dir))
        shutil.rmtree(args.root_dir)

    if not os.path.exists(args.root_dir):
        os.makedirs(args.root_dir)

    if args.tests is None:
        tests = ['train-nip', 'resume-nip', 'train-manipulation', 'train-dcn', 'train-manipulation-dcn']
    else:
        tests = args.tests.split(',')

    for test in tests:
        run_test(test, settings[test], args)
Esempio n. 2
0
    def setUp(self):
        self.config = get_config_and_rules(True)

        log_file_path = os.path.join(self.config.logs_directory_path,
                                     self.config.tests_log_filename
                                     ) if self.config.log_to_file else None
        self.logger = setup_logging(__file__, log_file_path, logging.DEBUG)

        self.workbook = xlwings.Book(
            os.path.join(self.config.data_directory_path,
                         self.config.excel_test_workbook_filename))
        self.interface_sheet = getitem(self.workbook.sheets,
                                       self.config.interface_sheet)
        self.empty_list = list()
        self.empty_dict = dict()
        self.empty_dataframe = pandas.DataFrame(self.empty_dict)
        self.default_fixture_input = os.path.join(
            self.config.fixtures_directory_path,
            self.config.default_fixture_filename)

        with open(self.default_fixture_input) as f:
            self.default_model_inputs = json_load_byteified(f)

        fixture_filename, filextension = os.path.splitext(
            self.default_fixture_input)
        fixture_filename = fixture_filename.replace(
            self.config.in_filename_appender,
            self.config.out_filename_appender,
        )
        fixture__out_filepath = os.path.join(
            self.config.fixtures_directory_path, ''.join(
                (fixture_filename, filextension)))

        with open(fixture__out_filepath) as f:
            self.projection_outputs = json.load(f)
Esempio n. 3
0
def main():
    config = ConfigParser.ConfigParser()
    config.read('config/model_config.config')
    app_log = setup_logging('gl_gc_logger', config.get('logger', 'log_file_name'))
    app_log.info('Scoring DUNS number: %d' % user_duns_number)


    sic_data = get_sic_data(user_sic_code, config.get('data_files', 'sic_data'))
    model_inputs = dict()
    model_inputs['division'] = user_division
    model_inputs['exposure_size'] = user_exposure_size / 1000000
    model_inputs['exposure_type'] = user_exposure_type
    model_inputs['predom_state'] = user_predominant_state
    model_inputs['sic_class'] = sic_data['SIC_Class']
    model_inputs['zero_loss_ind'] = 1 if sum(user_claims_history) == 0 else 0
    model_inputs['zip_density'] = get_zip_density(user_zip_code, config.get('data_files', 'easi_data'))
    model_inputs['avg_claim_count'] = sum(user_claims_history) / len(user_claims_history)
    predicted_loss, _ = run_model(model_inputs,
                                  config.get('data_files',
                                             'model_coefficients_file'),
                                  eval(config.get('model_rules', 'rules')))
    division_factors = get_division_factors(user_division, config.get('data_files', 'division_factors'))
    ilf_factors = get_ilf_factors(user_retention_amount, user_occurence_limit,
                                  config.getint('constants', 'ilf_loss_cap'),
                                  config.get('data_files', 'sic_data'))
    midpoint = (predicted_loss
                * config.get('constants', 'loss_development_factor')
                * (ilf_factors['occurence_limit'] - ilf_factors['retention_amount'])
                * config.getint('constants', 'aggregate_limit')
                * division_factors['off_balance_factor']
                * (1 + division_factors['rate_need'])
                / ilf_factors['loss_cap'] * (1 - config.get('constants', 'expense_rate'))
                )
Esempio n. 4
0
 def test_print_level(self):
     log = setup_logging('log2', 'test-log2.log', print_level=True)
     self.assertTrue(os.path.isfile('test-log2.log'))
     log.info('Test')
     with open('test-log2.log') as fh:
         log_lines = fh.readlines()
     self.assertIn('Test', log_lines[0])
     self.assertIn('INFO', log_lines[0])
Esempio n. 5
0
 def test_vanilla(self):
     log = setup_logging('log1', 'test-log1.log')
     self.assertTrue(os.path.isfile('test-log1.log'))
     log.info('Test')
     with open('test-log1.log') as fh:
         log_lines = fh.readlines()
     self.assertIn('Test', log_lines[0])
     self.assertNotIn('INFO', log_lines[0])
Esempio n. 6
0
    setup_logging
)
try:
    import ConfigParser as configparser
except:
    import configparser
import logging
import pandas
import numpy
import yaml
import json
import math
import os


logger = setup_logging(__file__, None, logging.DEBUG)


def calculator(coefficient, eazi_dataframe, density_constant):
    """ Creates and returns a function to calculate the model feature from
    the input coefficient

    :param coefficient: coefficient that needs to be calculated
    :param eazi_dataframe: eazi csv loaded into pandas
    :param density_constant: density constant used for log density calculation
    :type coefficient: str
    :type eazi_dataframe: pandas.DataFrame
    :type density_constant: int
    :returns: a callable function to execute the coefficient's calculations
    :rtype: function
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import argparse
import matplotlib.pyplot as plt
from collections import OrderedDict

# Toolbox imports
from helpers import utils, plots, tf_helpers
from compression.ratedistortion import plot_bulk

utils.setup_logging()
plots.configure('tex')
tf_helpers.disable_warnings()


def main():
    parser = argparse.ArgumentParser(
        description='Compare rate-distortion profiles for various codecs')
    parser.add_argument(
        '-d',
        '--data',
        dest='data',
        action='store',
        default='./data/rgb/clic512',
        help='directory with training & validation images (png)')
    parser.add_argument('-i',
                        '--images',
                        dest='images',
                        action='append',
                        default=[],