Esempio n. 1
0
    # how many parallel process do we want to spawn?
    pn = parser.add_argument(
        '-pn',
        '--process-number',
        type=int,
        default=4,
        help=
        'number of concurrent processes to spawn; WARNING: an high number can trigger google '
        'serving limits!')

    args = parser.parse_args()

    # sanity check on process numbers
    if args.process_number <= 0:
        sys.tracebacklimit = 0
        raise argparse.ArgumentError(pn, 'vaule must be bigger than zero!')

    # sanity check on client secret
    if not os.path.isfile(args.client_secret[0]):
        sys.tracebacklimit = 0
        raise argparse.ArgumentError(cs, 'file not found')

    # queues to manage the parallel processing of domains
    manager = multiprocessing.Manager()
    tasks = multiprocessing.JoinableQueue()
    domains_info = manager.list()

    # we want to be sure that domains are initialized, even if empty
    domains = []

    # if d parameter is present we use it otherwise we check for a csv file
Esempio n. 2
0
 def __call__(self, parser, namespace, values, option_string=None):
     if not values:
         raise argparse.ArgumentError(self, 'must not by empty.')
     setattr(namespace, self.dest, values)
Esempio n. 3
0
 def _check_value(self, action, value):
     if action.choices is not None and value not in action.choices:
         raise argparse.ArgumentError(action,
                                      f"invalid choice: '{value}'")
Esempio n. 4
0
def action_type(value):
    value = value.lower()
    if value not in VALID_ACTIONS:
        raise argparse.ArgumentError()
    return value
Esempio n. 5
0
 def _raise(x):
     raise argparse.ArgumentError(None, x)
Esempio n. 6
0
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument('name')
    parser.add_argument('--resume', '-r', action='store_true', default=False)
    parser.add_argument('--force', '-f', action='store_true', default=False)
    parser.add_argument('--command', '-c')
    parser.add_argument('--hsize', '-s', type=int, default=1)
    parser.add_argument('--parallel', '-p', action='store_true', default=False)
    opt = parser.parse_args()

    assert opt.hsize > 0, 'world size smaller than 1!'
    os.environ['hsize'] = str(opt.hsize)

    hogwarts_file = find_hogwarts(True)
    house_file = find_house('', True)

    src_dir = Path.cwd()
    if src_dir == hogwarts_file.parent:
        fail('curr directory contains .hogwarts.')
    if str(house_file).startswith(str(src_dir)):
        fail('curr directory contains {}.'.format(
            house_file.relative_to(src_dir)))

    wizard_dir = house_file.parent / opt.name
    force, resume = opt.force, opt.resume
    while wizard_dir.is_dir():
        if force:
            shutil.rmtree(str(wizard_dir))
            break
        elif resume:
            break
        else:
            print('wizard {} already exist at {}, '
                  'overwrite/resume/break? [Y/r/n] '.format(
                      repr(opt.name), wizard_dir),
                  end='',
                  flush=True)
            choice = input().strip().casefold()
            if choice == 'y':
                force = True
            elif choice == 'r':
                resume = True
            elif choice == 'n':
                sys.exit()

    random.seed(42)
    if resume:
        wizard_file = find_wizard(opt.name, True)
        wizard = str(wizard_file.parent.relative_to(house_file.parent))
        runway_info = yaml_load(wizard_file)
        trg_dir = wizard_file.parent / runway_info['trg_dir_from_wizard']
        processes = []
        for hrank in range(opt.hsize):
            hrank = random.randint(0, 1000000)
            log_dir = wizard_dir / '{:d}'.format(hrank)
            log_dir.mkdir(parents=True, exist_ok=True)
            wizard = '{}/{:d}'.format(opt.name, hrank)
            process = cd_and_execute(log_dir, trg_dir,
                                     runway_info['sub_command'], wizard, hrank)
            processes.append(process)
            if not opt.parallel:
                try:
                    while True:
                        process.wait()
                        break
                except KeyboardInterrupt:
                    print(
                        '\tPlease double press Ctrl-C within 1 second to kill job.'
                        'It will take several seconds to shutdown ...',
                        flush=True)
                    break
        if opt.parallel:
            try:
                for process in processes:
                    process.wait()
            except KeyboardInterrupt:
                for process in processes:
                    process.kill()
    else:
        if opt.command is None:
            raise argparse.ArgumentError(None, 'command required')
        wizard_dir.mkdir(parents=True, exist_ok=True)
        wizard_file = wizard_dir / '.wizard'
        trg_dir = wizard_dir / src_dir.name
        runway_info = {
            'date': time.strftime('%Y-%m-%d-%H:%M:%S'),
            'src_dir_from_hogwarts':
            str(src_dir.relative_to(hogwarts_file.parent)),
            'trg_dir_from_wizard': src_dir.name,
            'sub_command': opt.command,
            'full_command': get_full_command(sys.argv),
        }
        yaml_dump(runway_info, wizard_file)
        shutil.copytree(str(src_dir), str(wizard_dir / src_dir.name))
        processes = []
        for hrank in range(opt.hsize):
            hrank = random.randint(0, 1000000)
            log_dir = wizard_dir / '{:d}'.format(hrank)
            log_dir.mkdir(parents=True, exist_ok=True)
            wizard = '{}/{:d}'.format(opt.name, hrank)
            process = cd_and_execute(log_dir, trg_dir, opt.command, wizard,
                                     hrank)
            processes.append(process)
            if not opt.parallel:
                try:
                    while True:
                        process.wait()
                        break
                except KeyboardInterrupt:
                    print(
                        '\tPlease double press Ctrl-C within 1 second to kill job.'
                        'It will take several seconds to shutdown ...',
                        flush=True)
                    break
        if opt.parallel:
            try:
                for process in processes:
                    process.wait()
            except KeyboardInterrupt:
                for process in processes:
                    process.kill()
Esempio n. 7
0
def KeyValueArgument(value):
    try:
        k, v = value.split('=', 1)
        return k, v
    except ValueError:
        raise argparse.ArgumentError('values must of the form <key>=<value>')
Esempio n. 8
0
 def _check_value(self, action, value):
     # Override to customize the error message when a argument is not among the available choices
     # converted value must be one of the choices (if specified)
     if action.choices is not None and value not in action.choices:
         msg = 'invalid choice: {}'.format(value)
         raise argparse.ArgumentError(action, msg)
Esempio n. 9
0
def main():
    parser = argparse.ArgumentParser(
        description=
        "This program tracks the motion on chosen piece of video (live webcam , online stream , video file)",
        usage=
        "usage: some_sort_of_console_interface_i_guess.py [-h] [-v VIDEO_PATH] [-a MIN_AREA] [-s STREAM_URL] [-d] [-c]"
    )
    parser.add_argument("-v",
                        "--video",
                        type=str,
                        help="tracks motion on video file")
    parser.add_argument(
        "-a",
        "--min_area",
        type=int,
        default=500,
        help=
        "minimum area size in pixels which can be considered as motion (default is 500)"
    )
    parser.add_argument("-s",
                        "--stream",
                        type=str,
                        help="track motion on online stream")
    parser.add_argument(
        "-d",
        "--debug",
        action='store_true',
        help=
        "opens motion tracker in debug mode which shows different stages of tracking motion"
    )
    parser.add_argument("-c",
                        "--camera",
                        action='store_true',
                        help="track motion on your webcam")
    arguments = vars(parser.parse_args())

    if arguments.get("video") is not None:
        if arguments.get(
                "steam") is not None or arguments.get("camera") is True:
            raise argparse.ArgumentError("More than one sorce was given")
        if os.path.isfile(arguments.get("video")) is False:
            raise argparse.ArgumentError("Sorce file was not found")
            # Z pliku
        print("video with debug magic happens")
        stream = cv2.VideoCapture(arguments.get("video"))
        detect(stream, "video", arguments.get("min_area"),
               arguments.get("debug"))

    if arguments.get("camera") is True:
        if arguments.get("steam") is not None or arguments.get(
                "video") is not None:
            raise argparse.ArgumentError("More than one sorce was given")
        stream = VideoStream(src=0).start()
        detect(stream, "camera", arguments.get("min_area"),
               arguments.get("debug"))

    if arguments.get("stream") is not None:
        if arguments.get(
                "video") is not None or arguments.get("camera") is True:
            raise argparse.ArgumentError("More than one sorce was given")
        # wypadałoby moze cos co sprawdza czy url jest poprawny

        stream = cv2.VideoCapture(arguments.get("stream"))
        detect(stream, "stream", arguments.get("min_area"),
               arguments.get("debug"))
Esempio n. 10
0
def parse_args(args):
    # Python versions >= 3.5
    kwargs = {}
    if sys.version_info[0] * 10 + sys.version_info[1] >= 35:  # pragma: no cover
        kwargs = {'allow_abbrev': False}

    # Parse command line arguments
    parser = argparse.ArgumentParser(
        'python -m can.viewer',
        description=
        'A simple CAN viewer terminal application written in Python',
        epilog='R|Shortcuts: '
        '\n        +---------+-------------------------+'
        '\n        |   Key   |       Description       |'
        '\n        +---------+-------------------------+'
        '\n        | ESQ/q   | Exit the viewer         |'
        '\n        | c       | Clear the stored frames |'
        '\n        | s       | Sort the stored frames  |'
        '\n        | SPACE   | Pause the viewer        |'
        '\n        | UP/DOWN | Scroll the viewer       |'
        '\n        +---------+-------------------------+',
        add_help=False,
        **kwargs)

    optional = parser.add_argument_group('Optional arguments')

    optional.add_argument('-h',
                          '--help',
                          action='help',
                          help='Show this help message and exit')

    optional.add_argument(
        '--version',
        action='version',
        help="Show program's version number and exit",
        version='%(prog)s (version {version})'.format(version=__version__))

    # Copied from: https://github.com/hardbyte/python-can/blob/develop/can/logger.py
    optional.add_argument(
        '-b',
        '--bitrate',
        type=int,
        help='''Bitrate to use for the given CAN interface''')

    optional.add_argument(
        '-c',
        '--channel',
        help='''Most backend interfaces require some sort of channel.
                          For example with the serial interface the channel might be a rfcomm device: "/dev/rfcomm0"
                          with the socketcan interfaces valid channel examples include: "can0", "vcan0".
                          (default: use default for the specified interface)'''
    )

    optional.add_argument(
        '-d',
        '--decode',
        dest='decode',
        help='R|Specify how to convert the raw bytes into real values.'
        '\nThe ID of the frame is given as the first argument and the format as the second.'
        '\nThe Python struct package is used to unpack the received data'
        '\nwhere the format characters have the following meaning:'
        '\n      < = little-endian, > = big-endian'
        '\n      x = pad byte'
        '\n      c = char'
        '\n      ? = bool'
        '\n      b = int8_t, B = uint8_t'
        '\n      h = int16, H = uint16'
        '\n      l = int32_t, L = uint32_t'
        '\n      q = int64_t, Q = uint64_t'
        '\n      f = float (32-bits), d = double (64-bits)'
        '\nFx to convert six bytes with ID 0x100 into uint8_t, uint16 and uint32_t:'
        '\n  $ python -m can.viewer -d "100:<BHL"'
        '\nNote that the IDs are always interpreted as hex values.'
        '\nAn optional conversion from integers to real units can be given'
        '\nas additional arguments. In order to convert from raw integer'
        '\nvalues the values are divided with the corresponding scaling value,'
        '\nsimilarly the values are multiplied by the scaling value in order'
        '\nto convert from real units to raw integer values.'
        '\nFx lets say the uint8_t needs no conversion, but the uint16 and the uint32_t'
        '\nneeds to be divided by 10 and 100 respectively:'
        '\n  $ python -m can.viewer -d "101:<BHL:1:10.0:100.0"'
        '\nBe aware that integer division is performed if the scaling value is an integer.'
        '\nMultiple arguments are separated by spaces:'
        '\n  $ python -m can.viewer -d "100:<BHL" "101:<BHL:1:10.0:100.0"'
        '\nAlternatively a file containing the conversion strings separated by new lines'
        '\ncan be given as input:'
        '\n  $ cat file.txt'
        '\n      100:<BHL'
        '\n      101:<BHL:1:10.0:100.0'
        '\n  $ python -m can.viewer -d file.txt',
        metavar=
        '{<id>:<format>,<id>:<format>:<scaling1>:...:<scalingN>,file.txt}',
        nargs=argparse.ONE_OR_MORE,
        default='')

    optional.add_argument(
        '-f',
        '--filter',
        help='R|Space separated CAN filters for the given CAN interface:'
        '\n      <can_id>:<can_mask> (matches when <received_can_id> & mask == can_id & mask)'
        '\n      <can_id>~<can_mask> (matches when <received_can_id> & mask != can_id & mask)'
        '\nFx to show only frames with ID 0x100 to 0x103 and 0x200 to 0x20F:'
        '\n      python -m can.viewer -f 100:7FC 200:7F0'
        '\nNote that the ID and mask are alway interpreted as hex values',
        metavar='{<can_id>:<can_mask>,<can_id>~<can_mask>}',
        nargs=argparse.ONE_OR_MORE,
        default='')

    optional.add_argument('-i',
                          '--interface',
                          dest='interface',
                          help='R|Specify the backend CAN interface to use.',
                          choices=sorted(can.VALID_INTERFACES))

    # Print help message when no arguments are given
    if len(args) == 0:
        parser.print_help(sys.stderr)
        import errno
        raise SystemExit(errno.EINVAL)

    parsed_args = parser.parse_args(args)

    can_filters = []
    if len(parsed_args.filter) > 0:
        # print('Adding filter/s', parsed_args.filter)
        for flt in parsed_args.filter:
            # print(filter)
            if ':' in flt:
                _ = flt.split(':')
                can_id, can_mask = int(_[0], base=16), int(_[1], base=16)
            elif '~' in flt:
                can_id, can_mask = flt.split('~')
                can_id = int(can_id, base=16) | 0x20000000  # CAN_INV_FILTER
                can_mask = int(can_mask,
                               base=16) & 0x20000000  # socket.CAN_ERR_FLAG
            else:
                raise argparse.ArgumentError(None, 'Invalid filter argument')
            can_filters.append({'can_id': can_id, 'can_mask': can_mask})

    # Dictionary used to convert between Python values and C structs represented as Python strings.
    # If the value is 'None' then the message does not contain any data package.
    #
    # The struct package is used to unpack the received data.
    # Note the data is assumed to be in little-endian byte order.
    # < = little-endian, > = big-endian
    # x = pad byte
    # c = char
    # ? = bool
    # b = int8_t, B = uint8_t
    # h = int16, H = uint16
    # l = int32_t, L = uint32_t
    # q = int64_t, Q = uint64_t
    # f = float (32-bits), d = double (64-bits)
    #
    # An optional conversion from real units to integers can be given as additional arguments.
    # In order to convert from raw integer value the real units are multiplied with the values and similarly the values
    # are divided by the value in order to convert from real units to raw integer values.
    data_structs = {
    }  # type: Dict[Union[int, Tuple[int, ...]], Union[struct.Struct, Tuple, None]]
    if len(parsed_args.decode) > 0:
        if os.path.isfile(parsed_args.decode[0]):
            with open(parsed_args.decode[0], 'r') as f:
                structs = f.readlines()
        else:
            structs = parsed_args.decode

        for s in structs:
            tmp = s.rstrip('\n').split(':')

            # The ID is given as a hex value, the format needs no conversion
            key, fmt = int(tmp[0], base=16), tmp[1]

            # The scaling
            scaling = []  # type: list
            for t in tmp[2:]:
                # First try to convert to int, if that fails, then convert to a float
                try:
                    scaling.append(int(t))
                except ValueError:
                    scaling.append(float(t))

            if scaling:
                data_structs[key] = (struct.Struct(fmt), ) + tuple(scaling)
            else:
                data_structs[key] = struct.Struct(fmt)
            # print(data_structs[key])

    return parsed_args, can_filters, data_structs
Esempio n. 11
0
 def __call__(self, parser, namespace, value, option_string=None):
     if not (self.min <= value <= self.max):
         msg = 'invalid choice: %r (choose from [%d-%d])' % \
               (value, self.min, self.max)
         raise argparse.ArgumentError(self, msg)
     setattr(namespace, self.dest, value)
Esempio n. 12
0
def ParseArgs():
    parser = argparse.ArgumentParser(
        description=('Script for finding cases of stale expectations that can '
                     'be removed/modified.'))
    input_group = parser.add_mutually_exclusive_group()
    input_group.add_argument(
        '--expectation-file',
        help='A path to an expectation file to read from. If not specified and '
        '--test is not used, will automatically determine based off the '
        'provided suite.')
    input_group.add_argument(
        '--test',
        action='append',
        dest='tests',
        default=[],
        help='The name of a test to check for unexpected passes. Can be passed '
        'multiple times to specify multiple tests. Will be treated as if it was '
        'expected to be flaky on all configurations.')
    parser.add_argument(
        '--suite',
        required=True,
        # Could probably autogenerate this list using the same
        # method as Telemetry's run_browser_tests.py once there is no need to
        # distinguish WebGL 1 from WebGL 2.
        choices=[
            'context_lost',
            'depth_capture',
            'hardware_accelerated_feature',
            'gpu_process',
            'info_collection',
            'maps',
            'pixel',
            'power',
            'screenshot_sync',
            'trace_test',
            'webgl_conformance1',
            'webgl_conformance2',
        ],
        help='The test suite being checked.')
    parser.add_argument(
        '--project',
        required=True,
        help='The billing project to use for BigQuery queries. '
        'Must have access to the ResultDB BQ tables, e.g. '
        '"luci-resultdb.chromium.gpu_ci_test_results".')
    parser.add_argument('--num-samples',
                        type=int,
                        default=100,
                        help='The number of recent builds to query.')
    parser.add_argument('--output-format',
                        choices=[
                            'html',
                            'print',
                        ],
                        default='html',
                        help='How to output script results.')
    parser.add_argument('--remove-stale-expectations',
                        action='store_true',
                        default=False,
                        help='Automatically remove any expectations that are '
                        'determined to be stale from the expectation file.')
    parser.add_argument(
        '-v',
        '--verbose',
        action='count',
        default=0,
        help='Increase logging verbosity, can be passed multiple '
        'times.')
    parser.add_argument('-q',
                        '--quiet',
                        action='store_true',
                        default=False,
                        help='Disable logging for non-errors.')

    args = parser.parse_args()
    if args.quiet:
        args.verbose = -1
    SetLoggingVerbosity(args.verbose)

    if not (args.tests or args.expectation_file):
        args.expectation_file = os.path.join(
            os.path.dirname(__file__), 'gpu_tests', 'test_expectations',
            '%s_expectations.txt' %
            SUITE_TO_EXPECTATIONS_MAP.get(args.suite, args.suite))

    if args.remove_stale_expectations and not args.expectation_file:
        raise argparse.ArgumentError(
            '--remove-stale-expectations',
            'Can only be used with expectation files')

    return args
Esempio n. 13
0
 def get_value(self, name):
     try:
         return self.enum[name]
     except KeyError:
         raise argparse.ArgumentError(self,
                                      'invalid value: {}'.format(name))
Esempio n. 14
0
        help=
        ('specify directory to store data, ' +
         ' must start with "sim/MODEL_...", see possible values for MODEL below '
         ))
    aa('--show', action='store_true', help='show plots')
    aa('--verbosity',
       type=int,
       default=0,
       help='specify integer > 0 to get more output [default 0]')
    args = p.parse_args()

    # run checks on output directory
    dir = Path(args.dir)
    if not dir.resolve().parent.name == 'sim':
        raise argparse.ArgumentError(
            dir_arg,
            "The parent directory of the --dir argument needs to be named 'sim'"
        )
    else:
        model = dir.name.split('_')[0]
        settings.m(0, f'...model is: {model!r}')
    if dir.is_dir() and 'test' not in str(dir):
        message = (f'directory {dir} already exists, '
                   'remove it and continue? [y/n, press enter]')
        if str(input(message)) != 'y':
            settings.m(0, '    ...quit program execution')
            sys.exit()
        else:
            settings.m(0, '   ...removing directory and continuing...')
            shutil.rmtree(dir)

    settings.m(0, model)
 def OnSecondArgumentRaiseError(self):
   raise argparse.ArgumentError(self, _GenerateErrorMessage(
       '"{0}" argument cannot be specified multiple times'.format(self.dest)))
Esempio n. 16
0
 def __call__(self, value):
     # Session name can be a path or just a name.
     if (os.path.sep not in value
             and not VALID_SESSION_NAME_PATTERN.search(value)):
         raise argparse.ArgumentError(None, self.error_message)
     return value
Esempio n. 17
0
 def HeaderMap(arg):
     if not isinstance(arg, str) or '=' not in arg:
         raise argparse.ArgumentError(
             'header value must be in the form NAME=VALUE')
     name, value = arg.split('=')
     return (name, value)
Esempio n. 18
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument(
        "n_workers",
        metavar="<n_workers>",
        help="number of Dask workers",
        type=int,
    )

    parser.add_argument(
        "s3_path",
        metavar="<s3_path>",
        help=
        "path to S3 folder containing PCA-transformed data in Parquet dataset format",
    )

    parser.add_argument(
        "startmonth",
        metavar="<startmonth>",
        help="first (earliest) month of data to be used, format: YY-MM",
        type=valid_date,
    )

    parser.add_argument(
        "n_months_in_first_train_set",
        metavar="<n_months_in_first_train_set>",
        help=
        "number of months to be used in first train set during walk-forward validation",
        type=int,
    )

    parser.add_argument(
        "n_months_in_val_set",
        metavar="<n_months_in_val_set>",
        help=
        "number of months to be used in each validation set during walk-forward validation",
        type=int,
    )

    parser.add_argument(
        "--frac",
        "-f",
        help="fraction of rows to sample (default is 1.0 if omitted)",
        default="1.0",
        type=valid_frac,
    )

    args = parser.parse_args()

    if month_counter(
            args.startmonth) - args.n_months_in_first_train_set + 1 <= 0:
        raise argparse.ArgumentError(
            "The provided combination of start month and number of months in "
            "first train set is invalid - either not enough months exist to "
            "allow for the provided length of train period, or no months "
            "remain for any validation period.")
    elif (month_counter(args.startmonth) - args.n_months_in_first_train_set +
          1) < args.n_months_in_val_set:
        raise argparse.ArgumentError(
            "The provided combination of start month and number of months in "
            "first train set does not allow for the provided number of months "
            "in validation set.")

    fmt = "%(name)-12s : %(asctime)s %(levelname)-8s %(lineno)-7d %(message)s"
    datefmt = "%Y-%m-%d %H:%M:%S"
    log_dir = Path.cwd().joinpath("logs")
    path = Path(log_dir)
    path.mkdir(exist_ok=True)
    curr_dt_time = datetime.now().strftime("%Y_%m_%d_%H_%M")
    log_fname = f"logging_{curr_dt_time}_lightgbm.log"
    log_path = log_dir.joinpath(log_fname)

    model_dir = Path.cwd()
    model_fname = f"lgbr_model_{curr_dt_time}.txt"
    model_path = model_dir.joinpath(model_fname)

    logging.basicConfig(
        level=logging.DEBUG,
        filemode="w",
        format=fmt,
        datefmt=datefmt,
        filename=log_path,
    )

    # statements to suppress irrelevant logging by boto3-related libraries
    logging.getLogger("boto3").setLevel(logging.CRITICAL)
    logging.getLogger("botocore").setLevel(logging.CRITICAL)
    logging.getLogger("s3transfer").setLevel(logging.CRITICAL)
    logging.getLogger("urllib3").setLevel(logging.CRITICAL)

    # statements to suppress some of the logging messages from dask
    # more info here: https://docs.dask.org/en/latest/debugging.html
    logging.getLogger("dask").setLevel(logging.WARNING)
    logging.getLogger("distributed").setLevel(logging.WARNING)

    # also suppress s3fs messages
    logging.getLogger("s3fs").setLevel(logging.WARNING)
    logging.getLogger("fsspec").setLevel(logging.WARNING)

    # Check if code is being run on EC2 instance (vs locally)
    my_user = os.environ.get("USER")
    is_aws = True if "ec2" in my_user else False
    # Log EC2 instance name and type metadata
    if is_aws:
        instance_metadata = dict()
        instance_metadata["EC2 instance ID"] = ec2_metadata.instance_id
        instance_metadata["EC2 instance type"] = ec2_metadata.instance_type
        instance_metadata[
            "EC2 instance public hostname"] = ec2_metadata.public_hostname

        f = lambda x: ": ".join(x)
        r = list(map(f, list(instance_metadata.items())))
        nl = "\n" + " " * 55
        logging.info(
            f"Script is running on EC2 instance with the following metadata: "
            f"{nl}{nl.join(r)}")
    else:
        logging.info(
            "Script is running on local machine, not on EC2 instance.")

    logging.info(f"The Python version is {platform.python_version()}.")
    logging.info(f"The pandas version is {pd.__version__}.")
    logging.info(f"The Dask version is {dsk.__version__}.")
    logging.info(f"The LightGBM version is {lgb.__version__}.")

    s3_client = boto3.client("s3")

    logging.info(
        f"Running LightGBM model with n_workers: {args.n_workers}, s3_path: {args.s3_path}, "
        f"startmonth: {args.startmonth}, n_months_in_first_train_set: {args.n_months_in_first_train_set}, "
        f"n_months_in_val_set: {args.n_months_in_val_set}, and frac: {args.frac}..."
    )

    model = LightGBMDaskLocal(
        curr_dt_time,
        args.n_workers,
        args.s3_path,
        args.startmonth,
        args.n_months_in_first_train_set,
        args.n_months_in_val_set,
        frac=args.frac,
    )
    model.gridsearch_wfv(params)
    model.refit_and_save(model_path)

    # copy log file to S3 bucket
    try:
        response = s3_client.upload_file(f"./logs/{log_fname}", "my-ec2-logs",
                                         log_fname)
    except ClientError as e:
        logging.exception("Log file was not copied to S3.")
Esempio n. 19
0
    def parse_known_args(self, args=None, namespace=None):
        """
        Parse and validate args.
        """
        global crit, error, warn, info, debug, log
        # bring in the global variables containing the logger's methods
        def _export_namespace_dict(func):
            def _get_locals():
                try:
                    locals_result = {}
                    frame = inspect.currentframe()
                    frame = frame.f_back
                    # from _get_locals() to _export_namespace_dict()
                    frame = frame.f_back
                    # from _export_namespace_dict() to __wrap_log_fnc()
                    frame = frame.f_back
                    # from __wrap_log_fnc() to the function where crit or error was actually called.
                    locals_result = frame.f_locals
                finally:
                    del frame
                    return locals_result
            def __wrapped_fnc(*args,**kwargs):
                # print("__wrapped_fnc:{}".format(func))
                # print("__wrapped_fnc:{}".format(kwargs))
                foo = None
                if "extra" not in kwargs:
                    kwargs["extra"] = {}
                locals_result = {}
                globals_result = {}
                try:
                    frame = inspect.currentframe()
                    # frame = frame.f_back
                    # from __wrapped_fnc() to _export_namespace_dict()
                    frame = frame.f_back
                    # from _export_namespace_dict() to __wrap_log_fnc()
                    frame = frame.f_back
                    # from __wrap_log_fnc() to the function where crit or error was actually called.
                    locals_result = frame.f_locals
                    globals_result = frame.f_globals
                except Exception as err:
                    debug("Failed to grab locals and globals:{}".format(str(err)))
                finally:
                    del frame
                kwargs["extra"]["locals"] = locals_result
                kwargs["extra"]["globals"] = globals_result
                # print("__wrapped_fnc:{}".format(kwargs))
                return func(*args,**kwargs)
            return __wrapped_fnc
        def _wrap_log_fnc(func,lvl,n_args=1):
            """
            Accept a log event emitting function,
            and set it to not do anything if it's
            verbosity is not met in the current namespace.
            """
            # print("Wrapping {}\n\t{}".format(func,trace_fnc_path()))
            def __wrap_log_fnc(*args,**kwargs):
                """
                Accept args and kwargs, do a little preprocessing,
                and determine if its allowed to be passed to the 
                logger method. 
                """
                # print(namespace.verbosity, args[0])
                if (namespace.verbosity <= lvl and n_args == 1) or (n_args > 1 and namespace.verbosity <= args[0]):
                    if args and len(args) > 0:
                        if(n_args<1):
                            # if the wrapped method accepts no arg
                            # shouldn't happen
                            return func(**kwargs)
                        if(n_args==1):
                            # if the wrapped method only accepts 1 arg
                            return func(stringify(args),**kwargs)
                        else:
                            # print(namespace.verbosity, lvl, n_args, args[0])
                            # if the wrapped method accepts many arg
                            first_set = args[0:n_args-1]
                            second_set = args[n_args-1:]
                            # print("Logger.log was explicitly called")
                            args = tuple([a for a in first_set]+[stringify(second_set)])
                            if len(args) == 1:
                                args = args[0]
                            return func(*args,**kwargs)
                    else:
                        return func(*args,**kwargs)
                else:
                    return
            return __wrap_log_fnc
        if args is not None and len(args) > 0:
            namespace, args = super(MainArgumentParser, self).parse_known_args(args=args)
        else:
            namespace, args = super(MainArgumentParser, self).parse_known_args()
        # doing everything the ArgumentParser class would do,
        # and get the namespace it returns
        if namespace.verbose:
            # if logging everything,
            namespace.verbosity = 1
            # set verbosity/logging level to 1
        if not namespace.verbosity:
            # if no verbosity is set yet,
            namespace.verbosity = 60
            # set it to be quiet
        logr.setLevel(namespace.verbosity)
        # set logging level
        if crit == getattr(logr,"critical"):
            # if we haven't wrapped the functions yet
            crit  = _export_namespace_dict(crit)
            error = _export_namespace_dict(error)
            # setup error and crit to automatically grab locals and globals from the stack they're called in
            crit  = _wrap_log_fnc(crit, 50)
            error = _wrap_log_fnc(error,40)
            warn  = _wrap_log_fnc(warn, 30)
            info  = _wrap_log_fnc(info, 20)
            debug = _wrap_log_fnc(debug,10)
            log = _wrap_log_fnc(log, 0, n_args=2)
            # crit is now _wrap_log_fnc() with the the 
            # Logger method "critical" called inside it.
        try:
            # write any special checks or attributes needed for namespace
            if namespace.action == "play":
                parser = SnakeGameArgumentParser()
            elif namespace.action == "test":
                parser = TestingArgumentParser()
                namespace._test_parser = parser
            else:
                raise argparse.ArgumentError("action", "invalid action: please choose one of the listed keywords.")
            if args:
                namespace, args = parser.parse_known_args(args=args,namespace=namespace)
                # namespace = parser.parse_args(args=args)
            else:
                namespace, args = parser.parse_known_args(namespace=namespace)
                # namespace = parser.parse_args()

        except Exception as err:
            crit("Failed to setup namespace.", extra={ "err": err, "tb": get_tb()})
            namespace = None
        return namespace, args
def check_args(args):
    """to check if the args is ok"""
    if not (args.do_train or args.do_eval or args.do_test):
        raise argparse.ArgumentError('You should pass at least one argument for --do_train or --do_eval or --do_test')
Esempio n. 21
0
def extant_file(x):
    if not os.path.exists(x):
        raise argparse.ArgumentError("{0} does not exist".format(x))
    return x
Esempio n. 22
0
def main():

    basicConfig(
        format=
        '%(asctime)s %(levelname)s [%(process)d/%(threadName)s %(pathname)s:%(lineno)d]: %(message)s',
        stream=sys.stderr,
        level=INFO)

    parser = argparse.ArgumentParser(
        "Fetches member list from makeradmin.se or local file, then prompt user with"
        f" changes to make. Built from source revision {source_revision}.",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument(
        "-d",
        "--db",
        default=
        'mssql://(local)\\SQLEXPRESS/MultiAccess?trusted_connection=yes&driver=SQL+Server',
        help="SQL Alchemy db engine spec.")
    parser.add_argument(
        "-w",
        "--what",
        default=",".join(WHAT_ALL),
        help=f"What to update, comma separated list."
        f" '{WHAT_ORDERS}' tell maker admin to perform order actions before updating members."
        f" '{WHAT_UPDATE}' will update end times and rfid_tag."
        f" '{WHAT_ADD}' will add members in MultAccess."
        f" '{WHAT_BLOCK}' will block members that should not have access.")
    parser.add_argument(
        "-u",
        "--maker-admin-base-url",
        default='https://api.makeradmin.se',
        help="Base url of maker admin (for login and fetching of member info)."
    )
    parser.add_argument(
        "-m",
        "--members-filename",
        default=None,
        help=
        "Provide members in a file instead of fetching from maker admin (same format as response"
        " from maker admin).")
    parser.add_argument(
        "-t",
        "--token",
        default="",
        help="Provide token on command line instead of prompting for login.")
    parser.add_argument(
        "--customer-id",
        default=16,
        type=int,
        help="MultiAcces custoemr primary key to use to get and add users.")
    parser.add_argument(
        "--authority-id",
        default=23,
        type=int,
        help="MultiAcces authority primary key to add ny default to new users."
    )
    parser.add_argument(
        "--ignore-running",
        action='store_true',
        help="Ignore the check for if MultiAcces is running, do not use this.")

    args = parser.parse_args()

    what = args.what.split(',')
    for w in what:
        if w not in WHAT_ALL:
            raise argparse.ArgumentError(f"Unknown argument '{w}' to what.")

    with Tui() as ui:
        client = MakerAdminClient(ui=ui,
                                  base_url=args.maker_admin_base_url,
                                  members_filename=args.members_filename,
                                  token=args.token)

        ui.info__progress(f"connecting to {args.db}")
        engine = create_engine(args.db)

        while True:
            Session = sessionmaker(bind=engine)
            session = Session()

            sync(session=session,
                 ui=ui,
                 client=client,
                 customer_id=args.customer_id,
                 authority_id=args.authority_id,
                 ignore_running=args.ignore_running,
                 what=what)

            session.close()

            ui.prompt__run_again()
Esempio n. 23
0
def validate_principal(ns):
    num_set = sum(1 for p in [ns.object_id, ns.spn, ns.upn] if p)
    if num_set != 1:
        raise argparse.ArgumentError(
            None, 'specify exactly one: --object-id, --spn, --upn')
Esempio n. 24
0
def valid_range(s):
    if len(s.split(',')) > 2:
        raise argparse.ArgumentError(
            "The given range is invalid, please use ?,? format.")
    return tuple([int(i) for i in s.split(',')])
Esempio n. 25
0
    correct_pairs = 0
    total_pairs = 0
    # Precision
    for cluster in clusters:
        cp, tp = count_correct_pairs(cluster, labels_lookup)
        correct_pairs += cp
        total_pairs += tp
    # Recall:
    gt_clusters = defaultdict(list)
    # Count the actual number of possible true pairs:
    for row_no, label in labels_lookup.items():
        gt_clusters[label].append(row_no)
    true_pairs = 0
    for cluster_id, cluster_items in gt_clusters.items():
        n = len(cluster_items)
        true_pairs += n * (n-1)/2.0
    print("Correct Pairs that are in the same cluster:{}".format(correct_pairs))
    print("Total pairs as per the clusters created: {}".format(total_pairs))
    print("Total possible true pairs:{}".format(true_pairs))
    precision = float(correct_pairs)/total_pairs
    recall = float(correct_pairs)/true_pairs
    return precision, recall


if __name__ == '__main__':
    parser = argparse.ArgumentError()
    parser.add_argument('-c', '--clusters', help='List of lists where each \
                        list is a cluster')
    parser.add_argument('-l', '--labels', help='List of labels associated \
                        with each vector.')
Esempio n. 26
0
    def _validate_args(self):

        if self.args.keep_days is not None and self.args.keep_days < 1:
            raise argparse.ArgumentError(None, "keep_days must "\
                "be greater than 0. ")
        return
Esempio n. 27
0
 def error(self, msg, *args):
     msg %= args
     raise argparse.ArgumentError(self, msg)
Esempio n. 28
0
 def int_dec_or_hex(num):
     try:
         return str_to_int(num)
     except:
         msg = 'Cannot convert a parameter to a number'
         raise argparse.ArgumentError(msg)
Esempio n. 29
0
def load_dataset(
    metadata: Path,
    lexicon: t.Optional[str],
    cognate_lexicon: t.Optional[str] = None,
    status_update: t.Optional[str] = None,
    logger: logging.Logger = cli.logger,
):
    # logging.basicConfig(filename="warnings.log")
    dataset = pycldf.Dataset.from_metadata(metadata)
    # load dialect from metadata
    try:
        dialect = argparse.Namespace(
            **dataset.tablegroup.common_props["special:fromexcel"])
    except KeyError:
        dialect = None

    if not lexicon and not cognate_lexicon:
        raise argparse.ArgumentError(
            None,
            "At least one of WORDLIST and COGNATESETS excel files must be specified",
        )
    if lexicon:
        # load dialect from metadata
        if dialect:
            try:
                EP = excel_parser_from_dialect(dataset, dialect, cognate=False)
            except (AttributeError, KeyError) as err:
                field = re.match(r".*?'(.+?)'.+?'(.+?)'$", str(err)).group(2)
                logger.warning(
                    f"User-defined format specification in the json-file was missing the key {field}, "
                    f"falling back to default parser")
                EP = ExcelParser
        else:
            logger.warning(
                "User-defined format specification in the json-file was missing, falling back to default parser"
            )
            EP = ExcelParser
            # The Intermediate Storage, in a in-memory DB (unless specified otherwise)
        # add Status_Column if not existing
        if status_update:
            add_status_column_to_table(dataset=dataset, table_name="FormTable")
        EP = EP(dataset, row_type=Concept)

        EP.db.empty_cache()

        lexicon_wb = openpyxl.load_workbook(lexicon).active
        EP.parse_cells(lexicon_wb, status_update=status_update)
        EP.db.write_dataset_from_cache()

    # load cognate dataset if provided by metadata
    if cognate_lexicon:
        if dialect:
            try:
                ECP = excel_parser_from_dialect(
                    dataset,
                    argparse.Namespace(**dialect.cognates),
                    cognate=True)
            except (AttributeError, KeyError) as err:
                field = re.match(r".*?'(.+?)'.+?'(.+?)'$", str(err)).group(2)
                logger.warning(
                    f"User-defined format specification in the json-file was missing the key {field}, "
                    f"falling back to default parser")
                ECP = ExcelCognateParser
        else:
            logger.warning(
                "User-defined format specification in the json-file was missing, falling back to default parser"
            )
            ECP = ExcelCognateParser
        # add Status_Column if not existing
        if status_update:
            add_status_column_to_table(dataset=dataset,
                                       table_name="CognateTable")
        ECP = ECP(dataset, row_type=CogSet)
        ECP.db.cache_dataset()
        for sheet in openpyxl.load_workbook(cognate_lexicon).worksheets:
            ECP.parse_cells(sheet, status_update=status_update)
        ECP.db.write_dataset_from_cache()
Esempio n. 30
0
    nbook = v3.reads_py(code)
    nbook = v4.upgrade(nbook)  # Upgrade v3 to v4
    jsonform = v4.writes(nbook) + "\n"

    with open(output, "w") as f:
        f.write(jsonform)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("input", help="input python file")
    parser.add_argument("output", help="output notebook file")
    cellmark_style_arg = parser.add_argument("-c",
                                             "--cellmark-style",
                                             default="default",
                                             help="default|pycharm|spyder")
    args = parser.parse_args()

    cellmark_style_options = ("default", "pycharm", "spyder")
    if args.cellmark_style not in cellmark_style_options:
        raise argparse.ArgumentError(
            cellmark_style_arg,
            "invalid value, can only be one of " + str(cellmark_style_options))

    if args.cellmark_style == "default":
        py2ipynb_default(args.input, args.output)
    else:
        py2ipynb(args.input, args.output, args.cellmark_style, [
            "# ----------------------------------------------------------------------------"
        ])