예제 #1
0
파일: Log.py 프로젝트: dtgit/dtedu
def LogCallStack(level, *args):
    """
    LogCallStack(level, *args) => View the whole call stack for the specified call
    """
    if LOG_LEVEL and level <= LOG_LEVEL:
        if not level in LOG_PROCESSOR.keys():
            raise ValueError, "Invalid log level :", level

        stack = string.join(traceback.format_list(traceback.extract_stack()[:-1]))
        pr = "%8s %s:\n%s\n" % (
            LOG_LABEL[level],
            time.ctime(time.time()),
            stack
            )
        for data in args:
            try:
                if "\n" in data:
                    data = data
                else:
                    data = pprint.pformat(data)
            except:
                data = pprint.pformat(data)
            pr = pr + data + " "

        LOG_PROCESSOR[level](level, LOG_LABEL[level], pr, )
예제 #2
0
파일: utils.py 프로젝트: Nextdoor/kingpin
def diff_dicts(dict1, dict2):
    """Compares two dicts and returns the difference as a string,
    if there is any.

    Sorts two dicts (including sorting of the lists!!) and then diffs them.
    This will ignore string types ('unicode' vs 'string').

    args:
        dict1: First dict
        dict2: Second dict

    returns:
        A diff string if there's any difference, otherwise None.
    """
    dict1 = order_dict(dict1)
    dict2 = order_dict(dict2)

    if dict1 == dict2:
        return

    dict1 = pprint.pformat(dict1).splitlines()
    dict2 = pprint.pformat(dict2).splitlines()

    # Remove unicode identifiers.
    dict1 = map(lambda line: line.replace('u\'', '\''), dict1)
    dict2 = map(lambda line: line.replace('u\'', '\''), dict2)

    return '\n'.join(difflib.unified_diff(dict1, dict2, n=2))
예제 #3
0
def pretty(value, width=80, nl_width=80, **kw):
    if isinstance(value, dict):
        return '{\n %s' % (pformat(value, 4, nl_width)[1:])
    elif isinstance(value, tuple):
        return '\n%s%s' % (' ' * 4, pformat(value, width=nl_width, **kw))
    else:
        return pformat(value, width=width, **kw)
예제 #4
0
def Log(level, *args):
    """
    Log(level, *args) => Pretty-prints data on the console with additional information.
    """
    if LOG_LEVEL and level <= LOG_LEVEL:
        if not level in LOG_PROCESSOR.keys():
            raise ValueError("Invalid log level :", level)

        stack = ""
        stackItems = traceback.extract_stack()
        for depth in LOG_STACK_DEPTH:
            stackItem = stackItems[depth]
            stack = "%s%s:%s:" % (stack, os.path.basename(stackItem[0]), stackItem[1],)
##        return "%s(%s)"%( os.path.basename(stackItem[0]), stackItem[1] )
##        pr = "%s %s %010.02f %08d/%02d > " % (LOG_LABEL[level], time.ctime(time.time()), time.clock(), thread.get_ident(), threading.activeCount())
        pr = "%8s %s%s: " % (
            LOG_LABEL[level],
            stack,
            time.ctime(time.time()),
##            thread.get_ident(),
##            threading.activeCount()
            )
        for data in args:
            try:
                if "\n" in data:
                    data = data
                else:
                    data = pprint.pformat(data)
            except:
                data = pprint.pformat(data)
            pr = pr + data + " "

        LOG_PROCESSOR[level](level, LOG_LABEL[level], pr, stackItems)
예제 #5
0
    def post_dbe_update(cls, id, fq_name, obj_dict, db_conn, **kwargs):
        api_server = db_conn.get_api_server()
        db_conn.config_log('PortSever: post_dbe_update hit',
                           level=SandeshLevel.SYS_DEBUG)
        msg = ("PORT-UPDATE: %s", pformat(obj_dict))
        db_conn.config_log(str(msg), level=SandeshLevel.SYS_DEBUG)
        msg = ("PORT-UPDATE : %s", pformat(fq_name))
        db_conn.config_log(str(msg), level=SandeshLevel.SYS_DEBUG)
        msg = ("PORT-UPDATE : %s", pformat(kwargs))
        db_conn.config_log(str(msg), level=SandeshLevel.SYS_DEBUG)

        if obj_dict.get('bms_port_info') \
           and obj_dict.get('bms_port_info').get('local_link_connection'):
            ok, result = db_conn.dbe_read(
                obj_type='port',
                obj_id=obj_dict['uuid'],
                obj_fields=['physical_interface_back_refs'])
            msg = ("PORT-UPDATE BACK-REFS: %s", str(ok))
            if ok:
                msg = ("PORT-UPDATE BACK-REFS: %s", pformat(result))
                db_conn.config_log(str(msg), level=SandeshLevel.SYS_DEBUG)
                if result.get('physical_interface_back_refs'):
                    pi_uuid = result['physical_interface_back_refs'][0]['uuid']
                    port_fq_name = db_conn.uuid_to_fq_name(obj_dict['uuid'])
                    api_server.internal_request_ref_update(
                        'physical-interface',
                        pi_uuid, 'DELETE',
                        'port',
                        obj_dict['uuid'],
                        port_fq_name)

        cls._create_pi_ref(obj_dict, db_conn)

        return True, ''
예제 #6
0
def _upsert_cname_record(client, zone_id, cname_domain, origin_domain):
    """Upsert a CNAME record of `cname_domain` that points to `origin_domain`.
    """
    change = {
        'Action': 'UPSERT',
        'ResourceRecordSet': {
            'Name': cname_domain,
            'Type': 'CNAME',
            'TTL': 900,
            'ResourceRecords': [
                {
                    'Value': origin_domain
                },
            ],
        }
    }
    change_batch = {
        'Comment': 'Upsert {0} -> {1}'.format(cname_domain, origin_domain),
        'Changes': [change]}
    log.info(pformat(change_batch))

    r = client.change_resource_record_sets(
        HostedZoneId=zone_id,
        ChangeBatch=change_batch)
    log.info(r)
    if r['ResponseMetadata']['HTTPStatusCode'] != 200:
        msg = 'change_resource_record_sets failed with:\n' + pformat(change)
        log.error(msg)
        raise Route53Error(msg)
예제 #7
0
    def test_basic_line_wrap(self):
        # verify basic line-wrapping operation
        o = {'RPM_cal': 0,
             'RPM_cal2': 48059,
             'Speed_cal': 0,
             'controldesk_runtime_us': 0,
             'main_code_runtime_us': 0,
             'read_io_runtime_us': 0,
             'write_io_runtime_us': 43690}
        exp = """\
{'RPM_cal': 0,
 'RPM_cal2': 48059,
 'Speed_cal': 0,
 'controldesk_runtime_us': 0,
 'main_code_runtime_us': 0,
 'read_io_runtime_us': 0,
 'write_io_runtime_us': 43690}"""
        for type in [dict, dict2]:
            self.assertEqual(pprint.pformat(type(o)), exp)

        o = range(100)
        exp = '[%s]' % ',\n '.join(map(str, o))
        for type in [list, list2]:
            self.assertEqual(pprint.pformat(type(o)), exp)

        o = tuple(range(100))
        exp = '(%s)' % ',\n '.join(map(str, o))
        for type in [tuple, tuple2]:
            self.assertEqual(pprint.pformat(type(o)), exp)

        # indent parameter
        o = range(100)
        exp = '[   %s]' % ',\n    '.join(map(str, o))
        for type in [list, list2]:
            self.assertEqual(pprint.pformat(type(o), indent=4), exp)
예제 #8
0
파일: command.py 프로젝트: kouk/flower
 def print_banner(self, ssl):
     logger.info("Visit me at http%s://%s:%s", 's' if ssl else '',
                 options.address or 'localhost', options.port)
     logger.info('Broker: %s', self.app.connection().as_uri())
     logger.info('Registered tasks: \n%s',
                  pformat(sorted(self.app.tasks.keys())))
     logger.debug('Settings: %s', pformat(settings))
    def testSaveAccountSettings(self):
        url = reverse('self-settings')

        response = self.client.get(url)
        self.assertEqual(response.status_code, status.HTTP_200_OK)
        expect = response.data

        test_params = (
            {'email_rsvp_updates': False},
            {'email_rsvp_updates': True},
            {'email_social_activity': False},
            {'email_promotions': False},
            {'text_rsvp_updates': False},
            {'text_rsvp_updates': True},
            {'text_social_activity': True},
            {'text_promotions': True},
            {'default_event_privacy': EventPrivacy.PRIVATE},
            {'default_event_privacy': EventPrivacy.PUBLIC},
            {'profile_privacy': Account.PRIVATE},
            {'profile_privacy': Account.PUBLIC},
            {'email_rsvp_updates': False, 'email_social_activity': False,
             'profile_privacy': Account.PRIVATE},
        )

        for params in test_params:
            expect.update(params)
            response = self.client.patch(url, params)

            self.assertEqual(response.status_code, status.HTTP_200_OK, response.data)
            self.assertEqual(expect, response.data,
                             "\nExpect: \n{}\nResponse: \n{}\nparams: {}".format(pformat(expect),
                                                                                 pformat(response.data),
                                                                                 pformat(params)))
예제 #10
0
  def test_get_hook_base_dir(self, provide_directory_mock):
    fileCache = FileCache(self.config)
    # Check missing parameter
    command = {
      'commandParams' : {
      }
    }
    base = fileCache.get_hook_base_dir(command, "server_url_pref")
    self.assertEqual(base, None)
    self.assertFalse(provide_directory_mock.called)

    # Check existing dir case
    command = {
      'commandParams' : {
        'hooks_folder' : os.path.join('HDP', '2.1.1', 'hooks')
      }
    }
    provide_directory_mock.return_value = "dummy value"
    fileCache = FileCache(self.config)
    res = fileCache.get_hook_base_dir(command, "server_url_pref")
    self.assertEquals(
      pprint.pformat(provide_directory_mock.call_args_list[0][0]),
      "('/var/lib/ambari-agent/cache', "
      "{0}, "
      "'server_url_pref')".format(pprint.pformat(os.path.join('stacks','HDP', '2.1.1', 'hooks'))))
    self.assertEquals(res, "dummy value")
예제 #11
0
파일: cli.py 프로젝트: vivisect/vivisect
    def do_filemeta(self, line):

        '''
        Show/List file metadata.

        Usage: filemeta [ fname [ keyname ] ]

        Example: filemeta kernel32
        Example: filemeta kernel32 md5
        '''

        argv = e_cli.splitargs(line)
        if len(argv) == 0:

            self.vprint('Loaded Files:')
            for fname in self.getFiles():
                self.vprint('    %s' % fname)

        elif len(argv) == 1:
            d = self.getFileMetaDict(argv[0])
            self.vprint(pprint.pformat(d))

        elif len(argv) == 2:
            val = self.getFileMeta(argv[0], argv[1])
            self.vprint('%s (%s):' % (argv[1], argv[0]))
            self.vprint(pprint.pformat(val))

        else:
            self.do_help('filemeta')
예제 #12
0
파일: base.py 프로젝트: alihalabyah/inbox
def commit_uids(db_session, new_uids):
    try:
        msg = u"count: {}".format(len(new_uids))
        log.info("Commit new UIDs", message=msg,
                 new_committed_message_count=len(new_uids))
        db_session.add_all(new_uids)
        db_session.commit()
    except DataError as e:
        db_session.rollback()
        log.error("Issue inserting new UIDs into database. "
                  "This probably means that an object's property is "
                  "malformed or way too long, etc.")

        for uid in new_uids:
            log.error(uid)
            import inspect
            from pprint import pformat
            log.error(inspect.getmembers(uid))
            try:
                log.error(pformat(uid.__dict__, indent=2))
            except AttributeError:
                pass

            for part in uid.message.parts:
                log.error(inspect.getmembers(part))
                try:
                    log.error(pformat(part.__dict__, indent=2))
                except AttributeError:
                    pass

        raise e
예제 #13
0
파일: text.py 프로젝트: DHLabs/keep_isn
def pretty(value, width=80, nl_width=80, sep='\n', **kw):
    if isinstance(value, dict):
        return '{%s %s' % (sep, pformat(value, 4, nl_width)[1:])
    elif isinstance(value, tuple):
        return '%s%s%s' % (sep, ' ' * 4, pformat(value, width=nl_width, **kw))
    else:
        return pformat(value, width=width, **kw)
예제 #14
0
    def parse(self, xmltext):
        "parse a METS document"

        mets = libmets.CreateFromDocument(xmltext)
        self.logger.debug(pprint.pformat('METS document: '
                                         + (mets.toxml("utf-8",
                                                       element_name='mets'))))
        groups = self._parseFileGrpType(mets.fileSec.fileGrp)
        collMaps = []
        for smap in mets.structMap:
            name = smap.div.LABEL
            if smap.div.LABEL is None:
                name = ''
            collectionObjs = self._parseDivType(smap.div.div, groups)

            filePaths = []
            for fptr_element in smap.div.fptr:
                self.logger.debug('FPTR: ' + fptr_element.FILEID)
                pathList = self._pathListExtractor(fptr_element.FILEID,
                                                   fileGroups)
                filePaths += pathList

            collectionMap = {'name': name,
                             'type': smap.div.TYPE,
                             'filePaths': filePaths,
                             'nestedObjects': collectionObjs}
            self.logger.debug('Structural map: '
                              + pprint.pformat(collectionMap))
            collMaps.append(collectionMap)

        return collMaps
예제 #15
0
파일: config.py 프로젝트: SCOAP3/invenio
def update(filename='invenio.cfg', silent=True):
    """Update new config.py from conf options.

    The previous config.py is kept in a backup copy.
    """
    d = get_instance_config_object(filename, silent)
    new_config = StringIO()
    keys = set(d.__dict__.keys()) | set(default_keys())
    keys = list(keys)
    keys.sort()
    for key in keys:
        if key != key.upper():
            continue
        value = d.__dict__.get(key, current_app.config[key])
        type_ = type(value)
        prmt = key + ' (' + type_.__name__ + ') [' + pformat(value) + ']: '

        new_value = raw_input(prmt)
        try:
            new_value = ast.literal_eval(new_value)
        except (SyntaxError, ValueError):
            pass

        print('>>>', key, '=', pformat(new_value))
        print(key, '=', pformat(new_value), file=new_config)

    with current_app.open_instance_resource(filename, 'w') as config_file:
        config_file.write(new_config.getvalue())
예제 #16
0
파일: debug.py 프로젝트: zerok/django-old
 def get_request_repr(self, request):
     if request is None:
         return repr(None)
     else:
         # Since this is called as part of error handling, we need to be very
         # robust against potentially malformed input.
         try:
             get = pformat(request.GET)
         except:
             get = '<could not parse>'
         if request._post_parse_error:
             post = '<could not parse>'
         else:
             try:
                 post = pformat(self.get_post_parameters(request))
             except:
                 post = '<could not parse>'
         try:
             cookies = pformat(request.COOKIES)
         except:
             cookies = '<could not parse>'
         try:
             meta = pformat(request.META)
         except:
             meta = '<could not parse>'
         return smart_str(u'<%s\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
                          (request.__class__.__name__,
                           request.path,
                           unicode(get),
                           unicode(post),
                           unicode(cookies),
                           unicode(meta)))
예제 #17
0
 def stats (self):
   print "```"
   stats = self.uart.interface_stats( )
   print "```"
   print "```javascript"
   print pformat(stats)
   print "```"
예제 #18
0
def log_data(expected, actual):
    """Log list values."""
    for index, (evalue, avalue) in enumerate(zip(expected, actual)):
        logging.debug("\n{i} expected:\n{e}\n{i} actual:\n{a}".format(
            i=index,
            e=pprint.pformat(evalue),
            a=pprint.pformat(avalue)))
예제 #19
0
    def __unicode__(self):
        essential_for_verbose = (
            self.validator, self.validator_value, self.instance, self.schema,
        )
        if any(m is _unset for m in essential_for_verbose):
            return self.message

        pschema = pprint.pformat(self.schema, width=72)
        pinstance = pprint.pformat(self.instance, width=72)
        return self.message + textwrap.dedent("""

            Failed validating %r in %s%s:
            %s

            On %s%s:
            %s
            """.rstrip()
        ) % (
            self.validator,
            self._word_for_schema_in_error_message,
            _utils.format_as_index(list(self.relative_schema_path)[:-1]),
            _utils.indent(pschema),
            self._word_for_instance_in_error_message,
            _utils.format_as_index(self.relative_path),
            _utils.indent(pinstance),
        )
예제 #20
0
    def test_04_package_to_api1_with_relationship(self):

        context = {"model": model,
                 "session": model.Session}

        create = CreateTestData

        create.create_family_test_data()
        pkg = model.Session.query(model.Package).filter_by(name='homer').one()

        as_dict = pkg.as_dict()
        as_dict['license_title'] = None
        as_dict['num_tags'] = 0
        as_dict['num_resources'] = 0
        dictize = package_to_api1(pkg, context)

        as_dict["relationships"].sort(key=lambda x:x.items())
        dictize["relationships"].sort(key=lambda x:x.items())

        # the is_dict method doesn't care about organizations
        del dictize['organization']
        as_dict_string = pformat(as_dict)
        dictize_string = pformat(dictize)
        print(as_dict_string)
        print(dictize_string)

        assert as_dict == dictize, "\n".join(unified_diff(as_dict_string.split("\n"), dictize_string.split("\n")))
예제 #21
0
    def test_08_package_save(self):

        context = {"model": model,
                   "user": '******',
                   "session": model.Session}

        anna1 = model.Session.query(model.Package).filter_by(name='annakarenina').one()

        anna_dictized = self.remove_changable_columns(package_dictize(anna1, context))

        anna_dictized["name"] = u'annakarenina3'

        model.repo.new_revision()
        package_dict_save(anna_dictized, context)
        model.Session.commit()

        # Re-clean anna_dictized
        anna_dictized =  self.remove_changable_columns(anna_dictized)

        pkg = model.Session.query(model.Package).filter_by(name='annakarenina3').one()

        package_dictized = self.remove_changable_columns(package_dictize(pkg, context))

        anna_original = pformat(anna_dictized)
        anna_after_save = pformat(package_dictized)

        assert package_dictized == anna_dictized,\
            "\n".join(unified_diff(anna_original.split("\n"), anna_after_save.split("\n")))
예제 #22
0
 def __repr__(self):
     # Since this is called as part of error handling, we need to be very
     # robust against potentially malformed input.
     try:
         get = pformat(self.GET)
     except:
         get = '<could not parse>'
     if self._post_parse_error:
         post = '<could not parse>'
     else:
         try:
             post = pformat(self.POST)
         except:
             post = '<could not parse>'
     try:
         cookies = pformat(self.COOKIES)
     except:
         cookies = '<could not parse>'
     try:
         meta = pformat(self.META)
     except:
         meta = '<could not parse>'
     return smart_str(u'<ModPythonRequest\npath:%s,\nGET:%s,\nPOST:%s,\nCOOKIES:%s,\nMETA:%s>' %
                      (self.path, unicode(get), unicode(post),
                       unicode(cookies), unicode(meta)))
예제 #23
0
def update_project_acl(project_doc):
    '''Convert the old dict-style ACL to a list of ALLOW ACEs. Also move the
    security,tool,delete perms to 'admin'
    '''
    if not isinstance(project_doc['acl'], dict):
        log.warning('Project %s is already updated', project_doc['shortname'])
        return
    perm_map = dict(
        read='read',
        create='create',
        update='update',
        security='admin',
        tool='admin',
        delete='admin')
    new_acl = []
    for perm, role_ids in sorted(project_doc['acl'].iteritems()):
        perm = perm_map[perm]
        for rid in role_ids:
            if c_project_role.find(dict(_id=rid)).count() == 0:
                continue
            _grant(new_acl, perm, rid)
    if options.test:
        log.info('--- update %s\n%s\n%s\n---',
                 project_doc['shortname'],
                 pformat(_format_acd(project_doc['acl'])),
                 pformat(map(_format_ace, new_acl)))
    project_doc['acl'] = new_acl
    def http_post_collection(cls, tenant_name, obj_dict, db_conn):
        try:
            fq_name = obj_dict['fq_name']
            proj_uuid = db_conn.fq_name_to_uuid('project', fq_name[0:2])
        except cfgm_common.exceptions.NoIdError:
            return (False, (500, 'No Project ID error : ' + proj_uuid))

        (ok, proj_dict) = QuotaHelper.get_project_dict(proj_uuid, db_conn)
        if not ok:
            return (False, (500, 'Internal error : ' + pformat(proj_dict)))

        obj_type = 'network-policy'
        if 'network-policys' in proj_dict:
            quota_count = len(proj_dict['network-policys'])
            if obj_dict['id_perms'].get('user_visible', True) is not False:
                (ok, quota_limit) = QuotaHelper.check_quota_limit(proj_dict, obj_type,
                                                                  quota_count)
                if not ok:
                    return (False, (403, pformat(obj_dict['fq_name']) + ' : ' + quota_limit))

        _check_policy_rule_uuid(obj_dict.get('network_policy_entries'))
        try:
            cls._check_policy(obj_dict)
        except Exception as e:
            return (False, (500, str(e)))

        return True, ""
예제 #25
0
파일: ldapobject.py 프로젝트: tiran/pyldap
 def _ldap_call(self,func,*args,**kwargs):
   """
   Wrapper method mainly for serializing calls into OpenLDAP libs
   and trace logs
   """
   self._ldap_object_lock.acquire()
   if __debug__:
     if self._trace_level>=1:
       self._trace_file.write('*** %s %s - %s\n%s\n' % (
         repr(self),
         self._uri,
         '.'.join((self.__class__.__name__,func.__name__)),
         pprint.pformat((args,kwargs))
       ))
       if self._trace_level>=9:
         traceback.print_stack(limit=self._trace_stack_limit,file=self._trace_file)
   diagnostic_message_success = None
   try:
     try:
       result = func(*args,**kwargs)
       if __debug__ and self._trace_level>=2:
         if func.__name__!="unbind_ext":
           diagnostic_message_success = self._l.get_option(ldap.OPT_DIAGNOSTIC_MESSAGE)
     finally:
       self._ldap_object_lock.release()
   except LDAPError as e:
     if __debug__ and self._trace_level>=2:
       self._trace_file.write('=> LDAPError - %s: %s\n' % (e.__class__.__name__,str(e)))
     raise
   else:
     if __debug__ and self._trace_level>=2:
       if not diagnostic_message_success is None:
         self._trace_file.write('=> diagnosticMessage: %s\n' % (repr(diagnostic_message_success)))
       self._trace_file.write('=> result:\n%s\n' % (pprint.pformat(result)))
   return result
    def http_post_collection(cls, tenant_name, obj_dict, db_conn):
        proj_dict = obj_dict['project_refs'][0]
        if 'uuid' in proj_dict:
            proj_uuid = proj_dict['uuid']
        else:
            proj_uuid = db_conn.fq_name_to_uuid('project', proj_dict['to'])
        (ok, proj_dict) = QuotaHelper.get_project_dict(proj_uuid, db_conn)
        if not ok:
            return (False, (500, 'Internal error : ' + pformat(proj_dict)))

        obj_type = 'floating-ip'

        if 'floating_ip_back_refs' in proj_dict:
            quota_count = len(proj_dict['floating_ip_back_refs'])
            if obj_dict['id_perms'].get('user_visible', True) is not False:
                (ok, quota_limit) = QuotaHelper.check_quota_limit(proj_dict, obj_type,
                                                                  quota_count)
                if not ok:
                    return (False, (403, pformat(obj_dict['fq_name']) + ' : ' + quota_limit))

        vn_fq_name = obj_dict['fq_name'][:-2]
        req_ip = obj_dict.get("floating_ip_address")
        if req_ip and cls.addr_mgmt.is_ip_allocated(req_ip, vn_fq_name):
            return (False, (403, 'Ip address already in use'))
        try:
            fip_addr = cls.addr_mgmt.ip_alloc_req(vn_fq_name,
                                                  asked_ip_addr=req_ip)
        except Exception as e:
            return (False, (500, str(e)))
        obj_dict['floating_ip_address'] = fip_addr
        db_conn.config_log('AddrMgmt: alloc %s FIP for vn=%s, tenant=%s, askip=%s' \
            % (obj_dict['floating_ip_address'], vn_fq_name, tenant_name,
               req_ip), level=SandeshLevel.SYS_DEBUG)
        return True, ""
    def http_put(cls, id, fq_name, obj_dict, db_conn):
        (ok, sec_dict) = db_conn.dbe_read('security-group', {'uuid': id})
        if not ok:
            return (False, (500, 'Bad Security Group error : ' + pformat(sec_dict)))
        (ok, proj_dict) = QuotaHelper.get_project_dict(sec_dict['parent_uuid'], db_conn)
        if not ok:
            return (False, (500, 'Bad Project error : ' + pformat(proj_dict)))

        if 'security_group_entries' in obj_dict:
            rule_count = len(obj_dict['security_group_entries']['policy_rule'])
            obj_type = 'security-group-rule'
            for sg in proj_dict.get('security_groups', []):
                if sg['uuid'] == sec_dict['uuid']:
                    continue
                ok, sg_dict = db_conn.dbe_read('security-group', sg)
                if not ok:
                    continue
                sge = sg_dict.get('security_group_entries', {})
                rule_count += len(sge.get('policy_rule', []))

            if sec_dict['id_perms'].get('user_visible', True) is not False:
                (ok, quota_limit) = QuotaHelper.check_quota_limit(proj_dict, obj_type,
                                                                  rule_count-1)
                if not ok:
                    return (False, (403, pformat(fq_name) + ' : ' + quota_limit))

        _check_policy_rule_uuid(obj_dict.get('security_group_entries'))
        return True, ""
예제 #28
0
파일: idp.py 프로젝트: SUNET/eduid-IdP
    def _lookup_sso_session2(self) -> Optional[eduid_idp.sso_session.SSOSession]:
        """
        See if a SSO session exists for this request, and return the data about
        the currently logged in user from the session store.

        :return: Data about currently logged in user
        """
        _data = None
        _session_id = eduid_idp.mischttp.get_idpauthn_cookie(self.logger)
        if _session_id:
            _data = self.context.sso_sessions.get_session(eduid_idp.cache.SSOSessionId(_session_id))
            self.logger.debug("Looked up SSO session using idpauthn cookie :\n{!s}".format(_data))
        else:
            query = eduid_idp.mischttp.parse_query_string(self.logger)
            if query:
                self.logger.debug("Parsed query string :\n{!s}".format(pprint.pformat(query)))
                try:
                    _data = self.context.sso_sessions.get_session(query['id'])
                    self.logger.debug("Looked up SSO session using query 'id' parameter :\n{!s}".format(
                        pprint.pformat(_data)))
                except KeyError:
                    # no 'id', or not found in cache
                    pass
        if not _data:
            self.logger.debug("SSO session not found using 'id' parameter or 'idpauthn' cookie")
            return None
        _sso = eduid_idp.sso_session.from_dict(_data)
        self.logger.debug("Re-created SSO session {!r}".format(_sso))
        return _sso
    def http_post_collection(cls, tenant_name, obj_dict, db_conn):
        try:
            fq_name = obj_dict['fq_name']
            proj_uuid = db_conn.fq_name_to_uuid('project', fq_name[0:2])
        except cfgm_common.exceptions.NoIdError:
            return (False, (500, 'No Project ID error : ' + proj_uuid))

        (ok, proj_dict) = QuotaHelper.get_project_dict(proj_uuid, db_conn)
        if not ok:
            return (False, (500, 'Internal error : ' + pformat(proj_dict)))

        obj_type = 'virtual-network'
        if 'virtual_networks' in proj_dict:
            quota_count = len(proj_dict['virtual_networks'])
            if obj_dict['id_perms'].get('user_visible', True) is not False:
                (ok, quota_limit) = QuotaHelper.check_quota_limit(proj_dict, obj_type,
                                                                  quota_count)
                if not ok:
                    return (False, (403, pformat(obj_dict['fq_name']) + ' : ' + quota_limit))

        db_conn.update_subnet_uuid(obj_dict)

        (ok, error) =  cls._check_route_targets(obj_dict, db_conn)
        if not ok:
            return (False, (400, error))
        try:
            cls.addr_mgmt.net_create_req(obj_dict)
        except Exception as e:
            return (False, (500, str(e)))

        return True, ""
예제 #30
0
def _CompareSamples(a, b, context=True, numlines=1):
  """Generate an HTML table showing differences between 'a' and 'b'.

  Args:
    a: dict, as output by PerfKitBenchmarker.
    b: dict, as output by PerfKitBenchmarker.
    context: boolean. Show context in diff? If False, all lines are output, even
      those which are equal.
    numlines: int. Passed to difflib.Htmldiff.make_table.
  Returns:
    string or None. An HTML table, or None if there are no differences.
  """
  a = a.copy()
  b = b.copy()
  a['metadata'] = _SplitLabels(a.pop('labels', ''))
  b['metadata'] = _SplitLabels(b.pop('labels', ''))

  # Prune the keys in VARYING_KEYS prior to comparison to make the diff more
  # informative.
  for d in (a, b):
    for key in VARYING_KEYS:
      d.pop(key, None)

  astr = pprint.pformat(a).splitlines()
  bstr = pprint.pformat(b).splitlines()
  if astr == bstr and context:
    return None

  differ = difflib.HtmlDiff()
  return differ.make_table(astr, bstr, context=context, numlines=numlines)
예제 #31
0
        if not info:  # section not present (None or [])
            # Note: this also excludes existing sections without info..
            continue

        if opt_verbose:
            sys.stdout.write(tty_green + tty_bold + secname + " " + tty_normal)
            sys.stdout.flush()

        plugin["inv_function"](info)

    # Remove empty paths
    inv_cleanup_tree(g_inv_tree)

    if inventory_pprint_output:
        import pprint
        r = pprint.pformat(g_inv_tree)
    else:
        r = repr(g_inv_tree)

    path = inventory_output_dir + "/" + hostname
    if g_inv_tree:
        file(path, "w").write(r + "\n")
        gzip.open(path + ".gz", "w").write(r + "\n")

    else:
        if os.path.exists(
                path
        ):  # Remove empty inventory files. Important for host inventory icon
            os.remove(path)
        if os.path.exists(path + ".gz"):
            os.remove(path + ".gz")
예제 #32
0
 def to_str(self):
     """Returns the string representation of the model"""
     return pprint.pformat(self.to_dict())
예제 #33
0
def show_section(section):
    out = [""]

    depth = '\t' * (section['depth'] + 1)
    classification = "{} :: ".format(
        section['classification']) if section['classification'] else ""
    title = section['title_text'].replace('\n', '')

    if section['heuristic'] is not None:
        score = section['heuristic']['score']
        out.append("\t{}{}[{}] {}".format(depth, classification, score, title))

    else:
        out.append("\t{}{}{}".format(depth, classification, title))

    if section['body']:
        if section['body_format'] in ["TEXT", "MEMORY_DUMP"]:
            out.extend([
                "\t\t{}{}".format(depth, x)
                for x in section['body'].splitlines()
            ])
        elif section['body_format'] == 'GRAPH_DATA':
            try:
                body = json.loads(section['body'])
            except TypeError:
                body = section['body']
            dom_min, dom_max = body['data']['domain']
            values = body['data']['values']
            step = (dom_max - dom_min) / 5.0
            cmap = []

            for v in values:
                if v < dom_min + step:
                    cmap.append(" ")
                elif v < dom_min + (step * 2):
                    cmap.append(L1)
                elif v < dom_min + (step * 3):
                    cmap.append(L2)
                elif v < dom_min + (step * 4):
                    cmap.append(L3)
                else:
                    cmap.append(L4)

            out.append("\t\t{} [ ]: {} .. {}        [{}]: {} .. {}".format(
                depth, dom_min * 1.0, dom_min + step * 1.0, L4,
                dom_max - step * 1.0, dom_max * 1.0))
            out.append("\t\t{}{}{}{}".format(depth, LD, ''.join(cmap), RD))
        elif section['body_format'] == 'URL':
            try:
                body = json.loads(section['body'])
            except TypeError:
                body = section['body']
            if not isinstance(body, list):
                body = [body]
            for url in body:
                if 'name' in url:
                    out.append("\t\t{}{}: {}".format(depth, url['name'],
                                                     url['url']))
                else:
                    out.append("\t\t{}{}".format(depth, url['url']))
        elif section['body_format'] == 'JSON':
            try:
                body = pprint.pformat(json.loads(section['body']))
            except TypeError:
                body = pprint.pformat(section['body'])
            out.extend(
                ["\t\t{}{}".format(depth, x) for x in body.splitlines()])
        elif section['body_format'] == 'KEY_VALUE':
            try:
                body = json.loads(section['body'])
            except TypeError:
                body = section['body']
            out.extend(
                ["\t\t{}{}: {}".format(depth, k, v) for k, v in body.items()])
        else:
            out.append("Unknown section type: {}".format(
                section['body_format']))

    heuristic = section.get('heuristic')
    spacer = True
    if heuristic:
        spacer = False
        out.append('')
        out.append("\t\t\t{}[HEURISTIC] {}".format(depth, heuristic['name']))
        for signature in heuristic.get('signature', []):
            out.append("\t\t\t{}[SIGNATURE] {} ({}x)".format(
                depth, signature['name'], signature['frequency']))
        for attack in heuristic.get('attack', []):
            out.append("\t\t\t{}[ATTACK] {} ({})".format(
                depth, attack['pattern'], attack['attack_id']))

    if section['tags']:
        if spacer:
            out.append('')
        for tag in section['tags']:
            out.append("\t\t\t{}[{}] {}".format(depth,
                                                tag['short_type'].upper(),
                                                tag['value']))

    return out
예제 #34
0
 def __str__(self):
     return pprint.pformat(self.instance._conf, indent=4)
예제 #35
0
                # BGR->RGB
                img = cv2.imread(
                    os.path.join(cfg.DATASET.DATA_DIR,
                                 img_names[i]))[..., ::-1]
                log_writer.add_image("Images/{}".format(img_names[i]),
                                     img,
                                     epoch,
                                     dataformats='HWC')
                #add ground truth (label) images
                if grt is not None:
                    log_writer.add_image("Label/{}".format(img_names[i]),
                                         grt[..., ::-1],
                                         epoch,
                                         dataformats='HWC')

        # If in local_test mode, only visualize 5 images just for testing
        # procedure
        if local_test and img_cnt >= 5:
            break


if __name__ == '__main__':
    args = parse_args()
    if args.cfg_file is not None:
        cfg.update_from_file(args.cfg_file)
    if args.opts is not None:
        cfg.update_from_list(args.opts)
    cfg.check_and_infer()
    print(pprint.pformat(cfg))
    visualize(cfg, **args.__dict__)
예제 #36
0
# <class 'str'>

d = {'a': 1, 'b': 2, 'c': 3}

s_d = str(d)

print(s_d)
# {'a': 1, 'b': 2, 'c': 3}

print(type(s_d))
# <class 'str'>

import pprint

dl = {'a': 1, 'b': 2, 'c': [100, 200, 300]}

s_dl = str(dl)
print(s_dl)
# {'a': 1, 'b': 2, 'c': [100, 200, 300]}

p_dl = pprint.pformat(dl, width=10)
print(p_dl)
# {'a': 1,
#  'b': 2,
#  'c': [100,
#        200,
#        300]}

print(type(p_dl))
# <class 'str'>
예제 #37
0
import pprint
import ast

import parsercode.utils as utils
import parsercode.parsefile as parsefile

if __name__ == '__main__':
    utils.Log('Starting\n')
    try:
        utils.SetWorkingDirectory(__file__)
        land_mapping = utils.LoadLandMapping()
        print('Initial entries:', len(land_mapping))
        # Process both the UTC_Logs and the "parsed" directories. Need everything.
        file_list = glob.glob(
            os.path.join('UTC_logs', 'parsed', 'UTC_log*.log'))
        for filename in file_list:
            parsefile.GetCardDefinitions(filename, land_mapping)
        file_list = glob.glob(os.path.join('UTC_logs', 'UTC_log*.log'))
        for filename in file_list:
            parsefile.GetCardDefinitions(filename, land_mapping)
        s = pprint.pformat(land_mapping)
        f = open('land_mapping.txt', 'w')
        f.write(s)
        f.close()
        print(s)
        print('number of entries:', len(land_mapping))

    except Exception as ex:
        utils.Log(traceback.format_exc() + '\n')
        raise
예제 #38
0
import openpyxl, pprint, os


print('Opening workbook...')

wb = openpyxl.load_workbook(os.path.join(os.getcwd(), 'censuspopdata.xlsx'))
sheet = wb.get_sheet_by_name('Population by Census Tract')
print(type(sheet))
countyData = {}

print('Reading rows...')
# sheet.get_highest_row()
for row in range(2, int(sheet.max_row) + 1):
    state = sheet['B' + str(row)].value
    county = sheet['C' + str(row)].value
    pop = sheet['D' + str(row)].value

    countyData.setdefault(state, {})
    countyData[state].setdefault(county, {'tracts': 0, 'pop': 0})
    countyData[state][county]['tracts'] += 1
    countyData[state][county]['pop'] += int(pop)

print('Writing results...')
resultFile = open('census2010.py', 'w')
resultFile.write('allData = ' + pprint.pformat(countyData))
resultFile.close()
print('Done.')

예제 #39
0
            args.append(str(config['count']))
        if 'interval' in config:
            args.append('-i')
            args.append(str(config['interval']))
        if 'endIntervals' in config:
            args.append('-ei')
            args.append(str(config['endIntervals']))
        if 'begin' in config:
            args.append('-b')
            args.append(str(config['begin']))
        if 'end' in config:
            args.append('-e')
            args.append(str(config['end']))
        if 'begin' not in config and 'end' not in config and 'endIntervals' not in config:
            args.append('-r')
        

        import pprint
        logger.debug('args: %s' % pprint.pformat(args))
        logger.debug('command: %s' % ' '.join(args))

        p = subprocess.Popen(args, cwd=os.path.join(os.environ['SPLUNK_HOME'], 'etc', 'apps', 'splunk_app_gogen'),
                            stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=False)

        sys.stdout.write("<stream>\n")

        while True:
            data = p.stdout.readline()
            # logger.debug("data: %s" % data)
            sys.stdout.write(data)
예제 #40
0
파일: zrxv2.py 프로젝트: nfeignon/pymaker
 def __repr__(self):
     return pformat(vars(self))
 def __str__(self):
     return pformat(self.nodelist)
 def to_str(self):
     """
     Return model properties str
     """
     return pformat(self.to_dict())
예제 #43
0
    def convert(scope, operator, container):
        "convert method"
        dtype = guess_numpy_type(operator.inputs[0].type)
        if dtype != numpy.float64:
            dtype = numpy.float32
        xgb_node = operator.raw_operator
        inputs = operator.inputs

        objective, base_score, js_trees = XGBConverter.common_members(
            xgb_node, inputs)
        if base_score is None:
            raise RuntimeError(  # pragma: no cover
                "base_score cannot be None")
        params = XGBConverter.get_xgb_params(xgb_node)

        attr_pairs = XGBClassifierConverter._get_default_tree_attribute_pairs()
        XGBConverter.fill_tree_attributes(js_trees, attr_pairs,
                                          [1 for _ in js_trees], True)

        ncl = (max(attr_pairs['class_treeids']) + 1) // params['n_estimators']

        bst = xgb_node.get_booster()
        best_ntree_limit = getattr(bst, 'best_ntree_limit',
                                   len(js_trees)) * ncl
        if best_ntree_limit < len(js_trees):
            js_trees = js_trees[:best_ntree_limit]
            attr_pairs = XGBClassifierConverter._get_default_tree_attribute_pairs(
            )
            XGBConverter.fill_tree_attributes(js_trees, attr_pairs,
                                              [1 for _ in js_trees], True)

        if len(attr_pairs['class_treeids']) == 0:
            raise RuntimeError(  # pragma: no cover
                "XGBoost model is empty.")
        if 'n_estimators' not in params:
            raise RuntimeError(  # pragma: no cover
                "Parameters not found, existing:\n{}".format(pformat(params)))
        if ncl <= 1:
            ncl = 2
            # See https://github.com/dmlc/xgboost/blob/master/src/common/math.h#L23.
            attr_pairs['post_transform'] = "LOGISTIC"
            attr_pairs['class_ids'] = [0 for v in attr_pairs['class_treeids']]
        else:
            # See https://github.com/dmlc/xgboost/blob/master/src/common/math.h#L35.
            attr_pairs['post_transform'] = "SOFTMAX"
            # attr_pairs['base_values'] = [base_score for n in range(ncl)]
            attr_pairs['class_ids'] = [
                v % ncl for v in attr_pairs['class_treeids']
            ]

        classes = xgb_node.classes_
        if (numpy.issubdtype(classes.dtype, numpy.floating)
                or numpy.issubdtype(classes.dtype, numpy.signedinteger)):
            attr_pairs['classlabels_int64s'] = classes.astype('int')
        else:
            classes = numpy.array([s.encode('utf-8') for s in classes])
            attr_pairs['classlabels_strings'] = classes

        if dtype == numpy.float64:
            op_name = "TreeEnsembleClassifierDouble"
        else:
            op_name = "TreeEnsembleClassifier"

        # add nodes
        if objective == "binary:logistic":
            ncl = 2
            container.add_node(op_name,
                               operator.input_full_names,
                               operator.output_full_names,
                               name=scope.get_unique_operator_name(op_name),
                               op_domain='ai.onnx.ml',
                               **attr_pairs)
        elif objective == "multi:softprob":
            ncl = len(js_trees) // params['n_estimators']
            container.add_node(op_name,
                               operator.input_full_names,
                               operator.output_full_names,
                               name=scope.get_unique_operator_name(op_name),
                               op_domain='ai.onnx.ml',
                               op_version=1,
                               **attr_pairs)
        elif objective == "reg:logistic":
            ncl = len(js_trees) // params['n_estimators']
            if ncl == 1:
                ncl = 2
            container.add_node(op_name,
                               operator.input_full_names,
                               operator.output_full_names,
                               name=scope.get_unique_operator_name(op_name),
                               op_domain='ai.onnx.ml',
                               op_version=1,
                               **attr_pairs)
        else:
            raise RuntimeError(  # pragma: no cover
                "Unexpected objective: {0}".format(objective))
예제 #44
0
def stats_cb(stats_json_str):
    stats_json = json.loads(stats_json_str)
    print('\nKAFKA Stats: {}\n'.format(pformat(stats_json)))
예제 #45
0
    def step(self, action_dict):
        """
        Returns observations from ready agents.

        The returns are dicts mapping from agent_id strings to values. The
        number of agents in the env can vary over time.

        Returns
        -------
            obs (dict): New observations for each ready agent.
            rewards (dict): Reward values for each ready agent. If the
                episode is just started, the value will be None.
            dones (dict): Done values for each ready agent. The special key
                "__all__" (required) is used to indicate env termination.
            infos (dict): Optional info values for each agent id.
        """
        self.resetted = False
        self.steps += 1
        logger.debug('====> [SUMOTestMultiAgentEnv:step] Episode: %d - Step: %d <====',
                     self.episodes, self.steps)
        dones = {}
        dones['__all__'] = False

        shuffled_agents = sorted(action_dict.keys()) # it may seem not smar to sort something that
                                                     # may need to be shuffled afterwards, but it
                                                     # is a matter of consistency instead of using
                                                     # whatever insertion order was used in the dict
        if self._config['scenario_config']['agent_rnd_order']:
            ## randomize the agent order to minimize SUMO's insertion queues impact
            logger.debug('Shuffling the order of the agents.')
            self.rndgen.shuffle(shuffled_agents) # in-place shuffle

        # Take action
        for agent in shuffled_agents:
            self.agents[agent].step(action_dict[agent], self.simulation)

        logger.debug('Before SUMO')
        ongoing_simulation = self.simulation.step(until_end=False, agents=set(action_dict.keys()))
        logger.debug('After SUMO')

        ## end of the episode
        if not ongoing_simulation:
            logger.info('Reached the end of the SUMO simulation.')
            dones['__all__'] = True

        obs, rewards, infos = {}, {}, {}

        for agent in action_dict:
            ## check for collisions
            if self.simulation.collisions[agent] > 0:
                # punish the agent and remove it from the simulation
                dones[agent] = True
                obs[agent] = [0, 0]
                rewards[agent] = - self.agents[agent].config['max_speed']
                # infos[agent] = 'Collision'
                self.simulation.traci_handler.remove(agent, reason=tc.REMOVE_VAPORIZED)
            else:
                dones[agent] = agent not in self.simulation.veh_subscriptions
                obs[agent] = self.get_observation(agent)
                rewards[agent] = self.get_reward(agent)
                # infos[agent] = ''

        logger.debug('Observations: %s', pformat(obs))
        logger.debug('Rewards: %s', pformat(rewards))
        logger.debug('Dones: %s', pformat(dones))
        logger.debug('Info: %s', pformat(infos))
        logger.debug('========================================================')
        return obs, rewards, dones, infos
예제 #46
0
def GetCompletions_IncludeMultiFileType_test(app):
    trivial1 = {
        'filetypes': ['python', 'javascript'],
        'contents': ReadFile(PathToTestFile('trivial.js')),
    }
    trivial2 = {
        'filetypes': ['javascript'],
        'contents': ReadFile(PathToTestFile('trivial2.js')),
    }

    request = {
        'line_num': 1,
        'column_num': 3,
        'file_data': {
            PathToTestFile('trivial.js'): trivial1,
            PathToTestFile('trivial2.js'): trivial2,
        },
    }

    app.post_json(
        '/event_notification',
        CombineRequest(
            request, {
                'filepath': PathToTestFile('trivial2.js'),
                'event_name': 'FileReadyToParse',
            }))

    response = app.post_json(
        '/completions',
        CombineRequest(
            request,
            {
                'filepath': PathToTestFile('trivial2.js'),
                # We must force the use of semantic engine because the previous test would
                # have entered 'empty' results into the completion cache.
                'force_semantic': True,
            })).json

    print('completer response: {0}'.format(pformat(response, indent=2)))

    assert_that(
        response,
        has_entries({
            'completion_start_column':
            3,
            # Note: This time, we *do* see the completions, becuase one of the 2
            # filetypes for trivial.js is javascript.
            'completions':
            contains_inanyorder(
                CompletionEntryMatcher('y', 'string'),
                CompletionEntryMatcher('z', 'string'),
                CompletionEntryMatcher('toString', 'fn() -> string'),
                CompletionEntryMatcher('toLocaleString', 'fn() -> string'),
                CompletionEntryMatcher('valueOf', 'fn() -> number'),
                CompletionEntryMatcher('hasOwnProperty',
                                       'fn(prop: string) -> bool'),
                CompletionEntryMatcher('isPrototypeOf', 'fn(obj: ?) -> bool'),
                CompletionEntryMatcher('propertyIsEnumerable',
                                       'fn(prop: string) -> bool'),
            ),
            'errors':
            empty(),
        }))
예제 #47
0
 def on_msg_error(self, msg, cell, content):
     self.log.error(f"ERROR\n---\n%s",
                    pformat(msg))
예제 #48
0
 def send(self, url, message_data, **kwargs):
     logger.info(pprint.pformat(message_data, indent=4))
예제 #49
0
def capture_thread(q, cameras):
    config =  Scribe3Configuration()
    '''Manage the capture image process'''
    sound = SoundLoader.load('camera_shoot.wav')
    sound_delay = config.get('sound_delay', 0)
    has_single_camera = cameras.get_num_cameras() == 1

    if not config.is_true('stats_disabled'):
        from ia_scribe.breadcrumbs.api import log_event

    def psound(sound):
        try:
            Logger.info('Camera shoot sound delay is {}s'
                        .format(sound_delay))
            try:
                if float(sound_delay) < 0:
                    Logger.debug('ScribeWidget: Negative value in config '
                                 'for sound_delay. Camera shoot sound is '
                                 'disabled')
                    return
            except Exception:
                pass
            sound.seek(0)
            time.sleep(float(sound_delay))
            sound.play()
        except Exception as e:
            Logger.exception('Failed to play camera shoot '
                             'sound ({})'.format(e))

    Logger.info('Starting capture thread with Queue {} '
                .format(q))

    myfile = None
    clog = False
    if config.is_true('camera_logging'):
        myfile = open('/tmp/tts_shoot_times.csv', 'a')
        clog = True

    Logger.info('Camera logging {} with file {}'
                .format(clog, myfile))

    while True:
        camera_kwargs = q.get()
        Logger.info(
            'Capturing image using kwargs: {}{}'
            .format(os.linesep, pformat(camera_kwargs))
        )
        camera_side = camera_kwargs[camera_system.KEY_SIDE]
        path = camera_kwargs[camera_system.KEY_PATH]
        thumb_path = camera_kwargs[camera_system.KEY_THUMB_PATH]
        callback = camera_kwargs[camera_system.KEY_CALLBACK]
        stats = {}
        capture_error = None
        if camera_side == 'left':
            angle = 90
        elif camera_side == 'right':
            angle = -90
        else:
            angle = - config.get_integer('default_single_camera_rotation',180) \
                        if has_single_camera else 0

        port = cameras.get_camera_port(camera_side)
        Logger.info(
            'ScribeWidget: Capturing side {s} with '
            'port {p}, angle {a}, path {path} and fake {fake}'
            .format(s=camera_side, p=port, a=angle, path=path,
                    fake=fake_cameras)
        )
        camera_start = time.time()
        # Move this in try catch block when debug is finished

        Logger.info('ScribeWidget: Playing camera shoot sound')
        _thread.start_new_thread(psound, (sound,))
        Logger.info('Sound dispatched, now calling camera.shoot()')
        output = cameras.take_shot(camera_side, path)

        if type(output) is subprocess.CalledProcessError:
            capture_error = 'gphoto error: {}'.format(output.returncode)
            Logger.exception('ScribeWidget: Camera shoot failed')
        elif type(output) is Exception:
            capture_error = 'generic gphoto error'
            Logger.exception('ScribeWidget: Camera shoot failed with error {}'.format(output))

        stats['capture_time'] = time.time() - camera_start
        Logger.info('ScribeWidget: Camera took {t:.2f}s'
                    .format(t=stats['capture_time']))

        # Having a camera powered off won't cause an exception, so check
        # to see if the image was created
        if not os.path.exists(path):
            Logger.error('ScribeWidget: Camera failed to capture an image')
            capture_error = 'gphoto error'

        Logger.info('ScribeWidget: Generating thumbs...')
        thumb_start = time.time()
        if capture_error is None:
            try:
                size = (1500, 1000)  # (6000,4000)/4
                if config.is_true('low_res_proxies'):
                    size = (750, 500)  # (6000,4000)/8

                # size = (3000, 2000) # (6000,4000)/2
                image = Image.open(path)
                image.thumbnail(size)
                image = image.rotate(angle, expand=True)

                if fake_cameras is not False:
                    Logger.info('ScribeWidget: fake_cameras is {} '
                                '(not False) -> Drawing debug output'
                                .format(fake_cameras))
                    draw = ImageDraw.Draw(image)
                    font_color = (0, 0, 0)
                    font_filename = join(scribe_globals.FONTS_DIR,
                                         scribe_globals.UTF8_FONT + '.ttf')
                    font = ImageFont.truetype(font_filename, 50)
                    draw.text((0, 0),
                              os.path.basename(path),
                              font_color, font=font)
                    if angle % 360 == 0 or angle % 360 == 180:
                        height = size[1]
                    else:
                        height = size[0]
                    draw.text((0, height - 200),
                              '{} image'.format(camera_side),
                              fill=font_color,
                              font=font)
                    draw.text((0, height - 400),
                              'port {}'.format(port),
                              fill=font_color,
                              font=font)

                image.save(thumb_path, 'JPEG', quality=90)
            except Exception as e:
                capture_error = 'thumbnail error' + str(e)
                Logger.exception(
                    'ScribeWidget: Failed to create thumbnail: {}'
                    .format(thumb_path)
                )

        stats['thumb_time'] = time.time() - thumb_start
        Logger.info(
            'ScribeWidget: Generated thumbnail at '
            '{thumb_path}. Took {t:.2f}s'
            .format(t=stats['thumb_time'],
                    thumb_path=thumb_path)
        )

        if clog:
            ctime = '{t:.2f}s'.format(t=stats['capture_time'])
            ttime = '{t:.2f}s'.format(t=stats['thumb_time'])
            Logger.info(
                'ScribeWidget: Writing ctime = {ctime}, '
                'ttime = {ttime} to stats file {path}'
                .format(ttime=ttime, ctime=ctime, path=myfile.name)
            )
            lineout = (
                '{0},{1},{2},{3},{4},{5},{6},{7}\n'
                .format(time.time(), camera_side, port, angle, ctime,
                        ttime, path, threading.current_thread().ident)
            )
            myfile.write(lineout)


        report = {camera_system.KEY_SIDE: camera_side,
                  camera_system.KEY_THUMB_PATH: thumb_path,
                  camera_system.KEY_ERROR: capture_error,
                  camera_system.KEY_STATS: stats}

        if config.is_true('enable_camera_telemetry'):
            metrics_payload = {camera_system.KEY_SIDE: camera_side,
                               camera_system.KEY_ERROR: capture_error,
                           'angle': angle,
                           'thumb_time': stats['thumb_time'],
                           'path': path,
                               }
            put_metric('scribe3.camera.capture_time', stats['capture_time'], metrics_payload)

        if not config.is_true('stats_disabled'):
            log_event('camera', 'capture_time', stats['capture_time'], camera_side)

        widget = camera_kwargs.get(camera_system.KEY_IMAGE_WIDGET, None)
        if widget:
            report[camera_system.KEY_IMAGE_WIDGET] = widget
        extra = camera_kwargs.get(camera_system.KEY_EXTRA, None)
        if extra is not None:
            report[camera_system.KEY_EXTRA] = extra
        if callback is not None:
            Clock.schedule_once(partial(callback, report))
        q.task_done()
예제 #50
0
def build_search(
        path, name, distributions, config, n_repeats, n_param_settings=None,
        _zip=True, add_date=0, do_local_test=True, readme=""):
    """ Create a job implementing a hyper-parameter search.

    Parameters
    ----------
    path: str
        Path to the directory where the search archive will be saved.
    name: str
        Name for the search.
    distributions: dict (str -> (list or distribution))
        Distributions to sample from. Can also be a list of samples.
    config: Config instance
        The base configuration.
    n_repeats: int
        Number of different random seeds to run each sample with.
    n_param_settings: int
        Number of parameter settings to sample. If not supplied, all
        possibilities are generated.
    _zip: bool
        Whether to zip the created search directory.
    add_date: bool
        Whether to add time to name of experiment directory.
    do_local_test: bool
        If True, run a short test using one of the sampled
        configs on the local machine to catch any dumb errors
        before starting the real experiment.
    readme: str
        String specifiying context/purpose of search.

    """
    if config.get('seed', None) is None:
        config.seed = gen_seed()

    with NumpySeed(config.seed):
        es = ExperimentStore(path, prefix="build_search")

        count = 0
        base_name = name
        has_built = False
        while not has_built:
            try:
                exp_dir = es.new_experiment(
                    name, config.seed, add_date=add_date, force_fresh=1)
                has_built = True
            except FileExistsError:
                name = "{}_{}".format(base_name, count)
                count += 1

        if readme:
            with open(exp_dir.path_for('README.md'), 'w') as f:
                f.write(readme)

        print(config)
        exp_dir.record_environment(config=config)

        print("Building parameter search at {}.".format(exp_dir.path))

        job = Job(exp_dir.path)

        new_configs = sample_configs(distributions, n_repeats, n_param_settings)

        with open(exp_dir.path_for("sampled_configs.txt"), "w") as f:
            f.write("\n".join("idx={}: {}".format(config["idx"], pformat(config)) for config in new_configs))

        print("{} configs were sampled for parameter search.".format(len(new_configs)))

        if do_local_test:
            print("\nStarting local test " + ("=" * 80))
            test_config = new_configs[0].copy()
            test_config.update(max_steps=1000, render_hook=None)
            _RunTrainingLoop(config)(test_config)
            print("Done local test " + ("=" * 80) + "\n")

        job.map(_RunTrainingLoop(config.copy()), new_configs)

        job.save_object('metadata', 'distributions', distributions)
        job.save_object('metadata', 'config', config)

        print(job.summary())

        if _zip:
            path = job.zip(delete=True)
        else:
            path = exp_dir.path

        print("Zipped {} as {}.".format(exp_dir.path, path))

        return path
예제 #51
0
def _print_attrs(obj, *args, join="\n  "):
    import pprint
    attr_list = ['{} = {}'.format(i, pprint.pformat(getattr(obj, i)))
                 for i in sorted(args)]
    split = ',\n'.join(attr_list).split('\n')
    return join + join.join(split)
예제 #52
0
 def __str__(self):
     return pprint.pformat(self.marshal())
예제 #53
0
 def save_data(self, name, value):
     os.makedirs(self.config.base.save_path, exist_ok=True)
     path = os.path.join(self.config.base.save_path, f'{name}.dat')
     data = pprint.pformat(value, width=1)
     with open(path, 'w') as fp:
         fp.write(data)
예제 #54
0
    print("-" * 50)
    msg = (
        "Connecting to '%s' database on %s server at '%s'" %
        (sqlalchemy_db_url.database,
         sqlalchemy_db_url.get_dialect().name.upper(), sqlalchemy_db_url.host))
    print(msg)
    engine = create_engine(db_url)
    Base.metadata.bind = engine
    # Base.metadata.bind.echo = True   # Log the SQL interaction.
    msg = (engine.execute("select 'Connected to %s database'" %
                          sqlalchemy_db_url.database).scalar())
    print(msg)
    msg = ("Established connection to '%s' database" %
           sqlalchemy_db_url.database)
    print(msg)

    print("-" * 50)
    print("Starting Alembic upgrade database to latest version")
    command.upgrade(alembic_conf, "head", tag=db_url)
    print("Completed running Alembic command")
    print("-" * 50)


if __name__ == "__main__":
    import pprint
    setup()
    dummy_query = session.query(Device).limit(10)
    print("Sample list of  Device")
    print(pprint.pformat(dummy_query.all()))
예제 #55
0
 def __repr__(self):
     return pprint.pformat(self.storage)
예제 #56
0
cell_name_list = [
    'sarclkdelay', 'sarclkdelay_compact', 'sarclkdelay_compact_dual'
]
impl_lib = 'adc_sar_generated'
#tb_lib = 'adc_sar_testbenches'
#tb_cell = 'sarclkdelay_tb_tran'

params = dict(
    lch=16e-9,
    pw=4,
    nw=4,
    m=1,
    device_intent='fast',
)
#generate_layout = False
#extract_layout = False

print('creating BAG project')
prj = bag.BagProject()

for cell_name in cell_name_list:
    # create design module and run design method.
    print('designing module')
    dsn = prj.create_design_module(lib_name, cell_name)
    print('design parameters:\n%s' % pprint.pformat(params))
    dsn.design(**params)

    # implement the design
    print('implementing design with library %s' % impl_lib)
    dsn.implement_design(impl_lib, top_cell_name=cell_name, erase=True)
예제 #57
0
if __name__ == '__main__':
  import doctest
  doctest.testmod( )

  import sys
  port = None
  port = sys.argv[1:] and sys.argv[1] or False
  serial_num = sys.argv[2:] and sys.argv[2] or False
  if not port or not serial_num:
    print "usage:\n%s <port> <serial>, eg /dev/ttyUSB0 208850" % sys.argv[0]
    sys.exit(1)
  import link
  import stick
  import session
  from pprint import pformat
  logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
  log.info("howdy! I'm going to take a look at your pump download something info.")
  stick = stick.Stick(link.Link(port, timeout=.400))
  stick.open( )
  session = session.Pump(stick, serial_num)
  log.info(pformat(stick.interface_stats( )))

  downloader = Downloader(stick, session)
  downloader.download( )

  downloader = PageDownloader(stick, session)
  downloader.download( )

  log.info(pformat(stick.interface_stats( )))
  log.info("howdy! we downloaded everything.")
예제 #58
0
def corr3(config, logger=None):
    """Run the full three-point correlation function code based on the parameters in the
    given config dict.

    The function print_corr3_params() will output information about the valid parameters
    that are expected to be in the config dict.

    Optionally a logger parameter maybe given, in which case it is used for logging.
    If not given, the logging will be based on the verbose and log_file parameters.

    :param config:  The configuration dict which defines what to do.
    :param logger:  If desired, a logger object for logging. (default: None, in which case
                    one will be built according to the config dict's verbose level.)
    """
    # Setup logger based on config verbose value
    if logger is None:
        logger = treecorr.config.setup_logger(
                treecorr.config.get(config,'verbose',int,1),
                config.get('log_file',None))

    # Check that config doesn't have any extra parameters.
    # (Such values are probably typos.)
    # Also convert the given parameters to the correct type, etc.
    config = treecorr.config.check_config(config, corr3_valid_params, corr3_aliases, logger)

    import pprint
    logger.debug('Using configuration dict:\n%s',pprint.pformat(config))

    if ( 'output_dots' not in config 
          and config.get('log_file',None) is None 
          and config['verbose'] >= 2 ):
        config['output_dots'] = True

    # Set the number of threads
    num_threads = config.get('num_threads',None)
    logger.debug('From config dict, num_threads = %s',num_threads)
    treecorr.set_omp_threads(num_threads, logger)

    # Read in the input files.  Each of these is a list.
    cat1 = treecorr.read_catalogs(config, 'file_name', 'file_list', 0, logger)
    if len(cat1) == 0:
        raise AttributeError("Either file_name or file_list is required")
    cat2 = treecorr.read_catalogs(config, 'file_name2', 'rand_file_list2', 1, logger)
    cat3 = treecorr.read_catalogs(config, 'file_name3', 'rand_file_list3', 1, logger)
    rand1 = treecorr.read_catalogs(config, 'rand_file_name', 'rand_file_list', 0, logger)
    rand2 = treecorr.read_catalogs(config, 'rand_file_name2', 'rand_file_list2', 1, logger)
    rand3 = treecorr.read_catalogs(config, 'rand_file_name3', 'rand_file_list3', 1, logger)
    if len(cat2) == 0 and len(rand2) > 0:
        raise AttributeError("rand_file_name2 is invalid without file_name2")
    if len(cat3) == 0 and len(rand3) > 0:
        raise AttributeError("rand_file_name3 is invalid without file_name3")
    logger.info("Done reading input catalogs")

    # Do GGG correlation function if necessary
    if 'ggg_file_name' in config: #or 'm3_file_name' in config:
        logger.warning("Performing GGG calculations...")
        ggg = treecorr.GGGCorrelation(config,logger)
        ggg.process(cat1,cat2,cat3)
        logger.info("Done GGG calculations.")
        if 'ggg_file_name' in config:
            ggg.write(config['ggg_file_name'])
        if 'm3_file_name' in config:
            ggg.writeMapSq(config['m3_file_name'])

    # Do NNN correlation function if necessary
    if 'nnn_file_name' in config:
        if len(rand1) == 0:
            raise AttributeError("rand_file_name is required for NNN correlation")
        if len(cat2) > 0 and len(rand2) == 0:
            raise AttributeError("rand_file_name2 is required for NNN cross-correlation")
        if len(cat3) > 0 and len(rand3) == 0:
            raise AttributeError("rand_file_name3 is required for NNN cross-correlation")
        if (len(cat2) > 0) != (len(cat3) > 0):
            raise NotImplementedError(
                "Cannot yet handle 3-point corrleations with only two catalogs. "+
                "Need both cat2 and cat3.")
        logger.warning("Performing DDD calculations...")
        ddd = treecorr.NNNCorrelation(config,logger)
        ddd.process(cat1,cat2,cat3)
        logger.info("Done DDD calculations.")

        if len(cat2) == 0:
            logger.warning("Performing RRR calculations...")
            rrr = treecorr.NNNCorrelation(config,logger)
            rrr.process(rand1)
            logger.info("Done RRR calculations.")

            # For the next step, just make cat2 = cat3 = cat1 and rand2 = rand3 = rand1.
            cat2 = cat3 = cat1
            rand2 = rand3 = rand1
        else:
            logger.warning("Performing RRR calculations...")
            rrr = treecorr.NNNCorrelation(config,logger)
            rrr.process(rand1,rand2,rand3)
            logger.info("Done RRR calculations.")

        if config['nnn_statistic'] == 'compensated':
            logger.warning("Performing DRR calculations...")
            drr = treecorr.NNNCorrelation(config,logger)
            drr.process(cat1,rand2,rand3)
            logger.info("Done DRR calculations.")
            logger.warning("Performing DDR calculations...")
            ddr = treecorr.NNNCorrelation(config,logger)
            ddr.process(cat1,cat2,rand3)
            logger.info("Done DDR calculations.")
            logger.warning("Performing RDR calculations...")
            rdr = treecorr.NNNCorrelation(config,logger)
            rdr.process(rand1,cat2,rand3)
            logger.info("Done RDR calculations.")
            logger.warning("Performing RRD calculations...")
            rrd = treecorr.NNNCorrelation(config,logger)
            rrd.process(rand1,rand2,cat3)
            logger.info("Done RRD calculations.")
            logger.warning("Performing DRD calculations...")
            drd = treecorr.NNNCorrelation(config,logger)
            drd.process(cat1,rand2,cat3)
            logger.info("Done DRD calculations.")
            logger.warning("Performing RDD calculations...")
            rdd = treecorr.NNNCorrelation(config,logger)
            rdd.process(rand1,cat2,cat3)
            logger.info("Done RDD calculations.")
            ddd.write(config['nnn_file_name'],rrr,drr,rdr,rrd,ddr,drd,rdd)
        else:
            ddd.write(config['nnn_file_name'],rrr)

    # Do KKK correlation function if necessary
    if 'kkk_file_name' in config:
        logger.warning("Performing KKK calculations...")
        kkk = treecorr.KKKCorrelation(config,logger)
        kkk.process(cat1,cat2,cat3)
        logger.info("Done KKK calculations.")
        kkk.write(config['kkk_file_name'])

    # Do NNG correlation function if necessary
    if False:
    #if 'nng_file_name' in config or 'nnm_file_name' in config:
        if len(cat3) == 0:
            raise AttributeError("file_name3 is required for nng correlation")
        logger.warning("Performing NNG calculations...")
        nng = treecorr.NNGCorrelation(config,logger)
        nng.process(cat1,cat2,cat3)
        logger.info("Done NNG calculation.")

        # The default ng_statistic is compensated _iff_ rand files are given.
        rrg = None
        if len(rand1) == 0:
            if config.get('nng_statistic',None) == 'compensated':
                raise AttributeError("rand_files is required for nng_statistic = compensated")
        elif config.get('nng_statistic','compensated') == 'compensated':
            rrg = treecorr.NNGCorrelation(config,logger)
            rrg.process(rand1,rand1,cat2)
            logger.info("Done RRG calculation.")

        if 'nng_file_name' in config:
            nng.write(config['nng_file_name'], rrg)
        if 'nnm_file_name' in config:
            nng.writeNNMap(config['nnm_file_name'], rrg)


    # Do NNK correlation function if necessary
    if False:
    #if 'nnk_file_name' in config:
        if len(cat3) == 0:
            raise AttributeError("file_name3 is required for nnk correlation")
        logger.warning("Performing NNK calculations...")
        nnk = treecorr.NNKCorrelation(config,logger)
        nnk.process(cat1,cat2,cat3)
        logger.info("Done NNK calculation.")

        rrk = None
        if len(rand1) == 0:
            if config.get('nnk_statistic',None) == 'compensated':
                raise AttributeError("rand_files is required for nnk_statistic = compensated")
        elif config.get('nnk_statistic','compensated') == 'compensated':
            rrk = treecorr.NNKCorrelation(config,logger)
            rrk.process(rand1,rand1,cat2)
            logger.info("Done RRK calculation.")

        nnk.write(config['nnk_file_name'], rrk)

    # Do KKG correlation function if necessary
    if False:
    #if 'kkg_file_name' in config:
        if len(cat3) == 0:
            raise AttributeError("file_name3 is required for kkg correlation")
        logger.warning("Performing KKG calculations...")
        kkg = treecorr.KKGCorrelation(config,logger)
        kkg.process(cat1,cat2,cat3)
        logger.info("Done KKG calculation.")
        kkg.write(config['kkg_file_name'])
 def check_events(self, expected):
     events = self.get_events_wotime()
     if events != expected:
         self.fail(
             "events did not match expectation; got:\n%s\nexpected:\n%s"
             % (pprint.pformat(events), pprint.pformat(expected)))
예제 #60
0
def get_server_capabilities(args):
    global blue_comm
    for anim in constants.SERVER_CAPABILITIES: 
        print("Cap:" + anim + ":" + pprint.pformat(constants.SERVER_CAPABILITIES[anim]))        
        time.sleep(0.075)
        blue_comm.send_comm("Cap:" + anim + ":" + pprint.pformat(constants.SERVER_CAPABILITIES[anim]))