Пример #1
0
    def __init__(self, client=None, config=None):
        """
        Initialize ZTVApp instance.

        Args:
        ----
            client (:obj:`zenpy.Zenpy`): The Zendesk API client
        """
        self.config = config or configargparse.Namespace()
        self.screen = urwid.raw_display.Screen()
        self.frame = AppFrame(
            client=client, title=u"Zendesk Ticket Viewer", loop=self,
        )
        self.frame.add_page('WELCOME', WelcomePage)
        self.frame.add_page('TICKET_LIST', TicketListPage)
        self.frame.add_page('TICKET_VIEW', TicketViewPage)
        self.frame.add_page('ERROR', ErrorPage)
        self.frame.set_page('WELCOME')
        if getattr(self.config, 'unpickle_tickets'):
            # no creds required when unpickle_tickets so bypass log in
            self.frame.pages['WELCOME']._action_login()
            del self.frame.pages['WELCOME']

        self.__super.__init__(
            widget=self.frame, palette=self.palette, screen=self.screen,
        )
Пример #2
0
def _create_task(arg_config, session):
    solvers = arg_config.solvers
    solvers_args = arg_config.solvers_args
    assert len(solvers) == len(solvers_args),\
        "The amount of solver arguments must match the amount of solvers"
    for solver in solvers:
        assert solver in ALL_SOLVER,\
            f"Solver {solver} not found! Please make sure that all solver are properly named."
    task = Task(task_type="instance_test", status=Task.STATUS_OPTIONS.CREATED, name=arg_config.name)
    config = ConfigHolder.fromNamespace(arg_config, task=task,
                                        ignored_attributes=["url_path", "solvers", "solvers_args", "create_only", "config", "name"])
    jobs = _get_instances(task, config, session)
    for solver, solver_args in zip(solvers, solvers_args):
        subtask = Task(parent=task, name=f"{solver}_test", task_type="instance_test", status=Task.STATUS_OPTIONS.CREATED)
        task.children.append(subtask)
        subconfig_namespace = configargparse.Namespace(solver=solver,
                                                       solver_args=solver_args)
        subconfig = ConfigHolder.fromNamespace(subconfig_namespace, task=subtask)
        add_prev_job = (subconfig.with_start_sol is not None and subconfig.with_start_sol)
        if isinstance(jobs[0], TaskJobs):
            for task_job in jobs:
                prev_job = task_job if add_prev_job else None
                for i in range(config.repetitions):
                    subtask.jobs.append(TaskJobs(task=subtask, graph=task_job.graph, prev_job=prev_job))
        else:
            for graph in jobs:
                for i in range(config.repetitions):
                    subtask.jobs.append(TaskJobs(task=subtask, graph=graph))
    session.add(task)
    session.commit()
    return task, config
Пример #3
0
 def test_get_client_mocked(self):
     config = configargparse.Namespace(subdomain=self.dummy_subdomain,
                                       email=self.dummy_email,
                                       password=self.dummy_password)
     api = get_client(config)
     self.assertEqual(api.tickets.subdomain, config.subdomain)
     self.assertEqual(api.tickets.session.auth,
                      (config.email, config.password))
Пример #4
0
def _save_task_file(savepath, config, task):
    n_s = configargparse.Namespace()
    n_s.task = task.id
    parser = configargparse.Parser()
    parser.add_argument("--task")
    parser.add_argument("--database")
    parsed = parser.parse_args(
        args=[f"--task={task.id}", f"--database={config.url_path}"])
    parser.write_config_file(n_s, [savepath])
Пример #5
0
class TestBase(unittest.TestCase):
    """
    Base test case containing useful
    """
    dummy_subdomain = 'foo.com'
    dummy_email = '*****@*****.**'
    dummy_password = '******'
    config = configargparse.Namespace(subdomain=dummy_subdomain,
                                      email=dummy_email,
                                      password=dummy_password,
                                      unpickle_tickets=True,
                                      pickle_path=os.path.join(
                                          TEST_DATA_DIR, 'tickets.pkl'))
Пример #6
0
    def write_config_file(self):
        # TODO https://github.com/bw2/ConfigArgParse/issues/95
        not_saveable = {"gtd_capture_tags",
                        "config"}.intersection(self._args.__dict__.keys())
        if not_saveable:
            logging.warning("options ({}) can not be saved".format(', '.join(
                map(lambda s: s.replace('_', '-'), not_saveable))))
        args = configargparse.Namespace(**{
            k: v
            for k, v in self.args.__dict__.items() if k not in not_saveable
        })

        # TODO https://github.com/bw2/ConfigArgParse/issues/127
        with utils.suppress_stdout():
            self._parser.write_config_file(args, [self.config_path])
Пример #7
0
    def mock_validate_connection(self, subdomain, status_code, session_mock):
        """
        Validate cinnection using a session with a mocked status_code.

        Args:
            subdomain (str): The subdomain which is being tested
            status_code (int): the status code which is always returned by
                the mocked session
            session_mock : Provided by the mock.patch decorator
        """
        mock_response = requests.Response()
        mock_response.status_code = status_code
        session_mock.return_value = mock.MagicMock(get=mock.MagicMock(
            return_value=mock_response))
        session = requests.Session()
        config = configargparse.Namespace()
        config.subdomain = subdomain
        validate_connection(config, session)
Пример #8
0
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os

import configargparse as argparse

# `options` is the py-skygear options read from argv or os.environ.
options = argparse.Namespace()


def add_app_arguments(ap: argparse.ArgumentParser):
    ap.add_argument('--apikey',
                    metavar='APIKEY',
                    action='store',
                    default=None,
                    help="API Key of the application",
                    env_var='API_KEY')
    ap.add_argument('--masterkey',
                    metavar='MASTERKEY',
                    action='store',
                    default=None,
                    help="Master Key of the application",
                    env_var='MASTER_KEY')
Пример #9
0
"""
Provides configuration file and command line argument parsing functionality to all modules.

Parameters can be decentrally defined in any module by getting the global parser via get_settings_parser
and registering them by add_argument(). parse_settings() needs to be called one in the main program, then
all parameters are available under the global Namespace settings.

For the usage of config files see the documentation of configargparse or call the program with -h.
"""

import pickle
import numpy as np
import random
import configargparse

settings = configargparse.Namespace()  # global Namespace with all settings
unknown_args = []  # global list with all unknown parameters
_parser = None  # single global settings parser


def get_settings_parser():
    """Returns the single global argument parser for adding parameters.

    Parameters can be added in all modules by add_argument.
    After calling parse() once in the main program, all settings
    are available in the global settings dictionary.
    """
    global _parser
    if not _parser:
        _parser = configargparse.ArgParser(  # default_config_files=["default.cfg"],
            formatter_class=configargparse.ArgumentDefaultsRawHelpFormatter)
Пример #10
0
 def __call__(self, parser, namespace, values, option_string=None):
     group, dest = self.dest.split('.', 2)
     groupspace = getattr(namespace, group, configargparse.Namespace())
     setattr(groupspace, dest, values)
     setattr(namespace, group, groupspace)
Пример #11
0
def parse_args(args):
    """Parse command line parameters

    Args:
      args ([str]): command line parameters as list of strings

    Returns:
      :obj:`argparse.Namespace`: command line parameters namespace
    """
    # Path is assumed to be relative to being inside of the
    # git repo expanding-sdo-capabilities directory.
    p = configargparse.ArgParser(config_file_parser_class=YAMLConfigFileParser,
                                 default_config_files=['./configs/*.conf'],
                                 description='Training/testing pipeline')

    # Arguments that are common across all subprojects.
    p.add_argument('--version',
                   action='version',
                   version='sdo {ver}'.format(ver=__version__))
    p.add_argument('-v',
                   '--verbose',
                   dest="loglevel",
                   help="set loglevel to INFO",
                   default=logging.INFO,
                   action='store_const',
                   const=logging.INFO)
    p.add_argument('-vv',
                   '--very-verbose',
                   dest="loglevel",
                   help="set loglevel to DEBUG",
                   action='store_const',
                   const=logging.DEBUG)
    p.add_argument(
        '--log-minimal',
        dest='log_minimal',
        type=str2bool,
        nargs='?',
        const=True,
        default=False,
        help=
        "If true, log lines will have no prefix; if false, extensive prefix logging will appear"
    )
    p.add_argument(
        '-c',
        '--config',
        is_config_file=True,
        help=
        'Config file path with YAML values instead of command line switches')
    p.add_argument(
        '-p',
        '--pipeline-name',
        dest='pipeline_name',
        type=str,
        required=True,
        help=
        'Which pipeline to use: only AutocalibrationPipeline implemented in this version'
    )
    p.add_argument(
        '--experiment-name',
        dest='experiment_name',
        type=str,
        required=True,
        help='The name of this experiment, used to partition result artifacts')
    p.add_argument(
        '--model-version',
        dest='model_version',
        type=int,
        required=True,
        help=
        'Which version of the model for your particular pipeline you want to run'
    )
    p.add_argument(
        '--results-path',
        dest='results_path',
        required=True,
        help='Where to store generated logs, models, etc.',
    )
    p.add_argument('--num-epochs',
                   dest='num_epochs',
                   type=int,
                   default=5,
                   help='Number of training epochs')
    p.add_argument('--batch-size-train',
                   dest='batch_size_train',
                   type=int,
                   default=64,
                   help='Batch size for training')
    p.add_argument('--batch-size-test',
                   dest='batch_size_test',
                   type=int,
                   default=100,
                   help='Batch size for testing')
    p.add_argument('--data-basedir',
                   dest='data_basedir',
                   type=str,
                   default=DATA_BASEDIR,
                   help='Path to load training/testing data')
    p.add_argument('--data-inventory',
                   dest='data_inventory',
                   type=str,
                   default=DATA_INVENTORY,
                   help='Path to a pre-computed inventory file that contains '
                   'a dataframe of existing files. If False (or not valid) '
                   'the file search is done by folder and is much slower')
    p.add_argument('--test-ratio',
                   dest='test_ratio',
                   type=float,
                   default=0.3,
                   help='What percentage of the data to retain for testing')
    p.add_argument(
        '--save-interval',
        dest='save_interval',
        type=int,
        default=50,
        help=
        'Every save-interval epochs we will save the trained model and optimizer state'
    )
    p.add_argument(
        '--add-metrics-interval',
        dest='additional_metrics_interval',
        type=int,
        default=5,
        help=
        'Every additional_metrics_interval epochs we will save the additional metrics'
    )
    p.add_argument(
        '--log-interval',
        dest='log_interval',
        type=int,
        default=10,
        help=
        'While processing batches during training, how often to print out log statistics'
    )
    p.add_argument(
        '--wavelengths',
        dest='wavelengths',
        nargs='+',
        default='0094 0131',
        help='Wavelengths to use for input; Ex: --wavelengths 0094 0131')
    p.add_argument(
        '--instruments',
        dest='instruments',
        nargs='+',
        default='AIA AIA',
        help=
        'For each input wavelength, the instrument to use for its data; Ex: --instruments AIA AIA'
    )
    p.add_argument(
        '--actual-resolution',
        dest='actual_resolution',
        type=int,
        default=512,
        help=
        'Actual pixel resolution of training/testing images before subsampling is applied'
    )
    p.add_argument(
        '--subsample',
        dest='subsample',
        type=int,
        default=4,
        help=
        'Indicates what to reduce images by, against --actual-resolution. Ex: 512/4 = 128'
    )
    p.add_argument(
        '--cuda-device',
        dest='cuda_device',
        type=int,
        default=None,
        help=
        'CUDA GPU device number to use; if not provided a random CUDA GPU on the system will be used'
    )
    p.add_argument(
        '--random-seed',
        dest='random_seed',
        type=int,
        default=1,
        help='Random seed to use for initializing sources of randomness')
    p.add_argument(
        '--deterministic-cuda',
        dest='determininistic_cuda',
        type=str2bool,
        nargs='?',
        const=True,
        default=True,
        help=
        'Whether to force CUDA to be deterministic; can cause some perf slowdown'
    )
    p.add_argument('--continue-training',
                   dest='continue_training',
                   type=str2bool,
                   nargs='?',
                   const=True,
                   default=False,
                   help='Whether to continue training from a saved checkpoint')
    p.add_argument(
        '--saved-model-path',
        dest='saved_model_path',
        help='Absolute path to a saved model to continue training from')
    p.add_argument(
        '--saved-optimizer-path',
        dest='saved_optimizer_path',
        help='Absolute path to a saved optimizer to continue training from')
    p.add_argument('--start-epoch-at',
                   dest='start_epoch_at',
                   type=int,
                   default=1,
                   help='When restarting training, the epoch to start at')
    p.add_argument(
        '--yr-range',
        nargs='+',
        type=int,
        default=[],
        help=
        'Start and stop year range, inclusive at both ends. Ex: --yr-range 2012 2013'
    )
    p.add_argument(
        '--mnt-step',
        dest='mnt_step',
        type=int,
        default=1,
        help=
        'Month frequency, starting from January. Values must be 1 or greater.')
    p.add_argument(
        '--day-step',
        dest='day_step',
        type=int,
        default=1,
        help='Day frequency starting from 1. Values must be 1 or greater.')
    p.add_argument(
        '--h-step',
        dest='h_step',
        type=int,
        default=6,
        help='Hourly frequency starting from 0. Values must be 1 or greater.')
    p.add_argument(
        '--min-step',
        dest='min_step',
        type=int,
        default=60,
        help='Minute frequency starting from 0. Values must be 1 or greater.')
    p.add_argument(
        '--dataloader-workers',
        dest='dataloader_workers',
        type=int,
        # TODO: We should default this to the num of vCores - 1.
        default=6,
        help=
        'The number of workers to use when preparing data for feeding into the deep net'
    )
    p.add_argument(
        '--scaling',
        dest='scaling',
        type=str2bool,
        nargs='?',
        const=True,
        default=True,
        help=
        'If True scaling of the images by mean of the channel is applied. Look at the values'
        'inside sdo_dataset.py for more detail.')
    p.add_argument(
        '--apodize',
        dest='apodize',
        type=str2bool,
        nargs='?',
        const=True,
        default=False,
        help='If True, set off limb values of images to zero. Default is False'
    )
    p.add_argument(
        '--optimizer-weight-decay',
        type=float,
        default=0,
        help=
        'The weight decay to use for whatever optimizer might be used; current default Torchs Adam default'
    )
    p.add_argument(
        '--optimizer-lr',
        type=float,
        default=1e-3,
        help=
        'The learning rate to use for whatever optimizer might be used; current default Torchs Adam default'
    )

    # Parse arguments that are specific to each of the sub-projects.
    parse_autocal_args(p)

    args = vars(p.parse_args(args))

    # Make downstream processing easier by expanding paths.
    args['results_path'] = os.path.abspath(
        os.path.join(args['results_path'], args['experiment_name']))

    if args['continue_training']:
        if not args['saved_model_path'] or not args['saved_optimizer_path']:
            raise Exception(
                'To continue training you must provide: --saved-model-path, '
                '--saved-optimizer-path, and --start-epoch-at')
        args['saved_model_path'] = os.path.abspath(args['saved_model_path'])
        args['saved_optimizer_path'] = os.path.abspath(
            args['saved_optimizer_path'])

    args['scaled_width'] = int(args['actual_resolution'] / args['subsample'])
    args['scaled_height'] = args['scaled_width']

    # The logger is not setup yet here, as we need the configargs themselves
    # to configure it, so we just print to standard out these details.
    print('\nParsed configuration:\n\n{}'.format(pprint.pformat(args,
                                                                indent=2)))
    return configargparse.Namespace(**args)