示例#1
0
def get_traceback():
    """
    If an exception has been caught, return the system stack trace or else
    return stack trace of what is currently in the stack
    """
    if traceback.format_tb(sys.exc_info()[2]):
        delimiter = "\n"
        traceback_pretty = "Traceback: \n%s" % \
            delimiter.join(traceback.format_tb(sys.exc_info()[2]))
    else:
        # force traceback except for this call
        stack = traceback.extract_stack()[:-1]
        traceback_pretty = "%sForced traceback (most recent call last)" % \
            (' ' * 4, )
        for trace_tuple in stack:
            traceback_pretty += """
    File "%(file)s", line %(line)s, in %(function)s
        %(text)s""" % {
                'file': trace_tuple[0],
                'line': trace_tuple[1],
                'function': trace_tuple[2],
                'text': trace_tuple[3] is not None and
                str(trace_tuple[3]) or ""
            }
    return traceback_pretty
示例#2
0
    def handle_read(self):
        # debug
        self.printd("handle_read")

        try:
            # read from my socket, printd it, and send to my peer
            data = self.recv(4096)
        except:
            self.printd("EXCEPTION in recv: value=%s traceback=\n%s\n"
                        % (sys.exc_value, ''.join(format_tb(sys.exc_traceback))))
            data = ""
        else: 
            # debug
            self.printd("handle_read: len(data)=%d" % (len(data),))
        
        if len(data) == 0:
            self.printd("CLOSED on read")
            self.close()

        # if I've already connected to the remote server, write immediately
        if self.peer:
            self.peer.write(data)
            return

        # try to parse a complete HTTP header
        self.header_buf += data
        hdr_match = http_header_re.match(self.header_buf)
        if not hdr_match:
            # not a complete header yet, keep reading
            if len(self.header_buf) < self.header_max:
                return

            # error! too much data without an HTTP header. Barf!
            self.close()
            return

        # debug
        self.printd("handle_read: self.header_buf=[%s]" % (self.header_buf,))

        try:
            req = HttpReq()
            self.request = req
            
            # parse the HTTP header and find the remote server
            headers = hdr_match.group(1);
            (req.reqline, headers) = fixlen(2, re.split(r'\r?\n', headers, 1))
            req.headers = HeaderParser().parsestr(headers)

            (req.method, req.path, req.version) = fixlen(3, string.split(req.reqline, None, 3))

            # debug
            self.printd('req=%s' % (req,))
            
        except:
            # debug
            self.printd("EXCEPTION in handle_read: value=%s traceback=\n%s\n"
                        % (sys.exc_value, ''.join(format_tb(sys.exc_traceback))))

        # my parent can decide where to redirect this client to (by calling connect_server)
        self.parent.handle_client(self)
示例#3
0
    def _skip_node_accident_vm_recovery(self, uuid, vm_state):
        # Initalize status.
        status = self.STATUS_NORMAL

        try:
            self.rc_util_api.do_instance_reset(uuid, 'error')

            msg = "Skipped recovery. " \
                  "instance_id:%s, " \
                  "accident type: [node accident]." % (uuid)
            LOG.info(self.rc_util.msg_with_thread_id(msg))

        except EnvironmentError:
            status = self.STATUS_ERROR
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(self.rc_util.msg_with_thread_id(error_type))
            LOG.error(self.rc_util.msg_with_thread_id(error_value))
            for tb in tb_list:
                LOG.error(self.rc_util.msg_with_thread_id(tb))
        except:
            status = self.STATUS_ERROR
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(self.rc_util.msg_with_thread_id(error_type))
            LOG.error(self.rc_util.msg_with_thread_id(error_value))
            for tb in tb_list:
                LOG.error(self.rc_util.msg_with_thread_id(tb))

        return status
示例#4
0
    def host_maintenance_mode(self, notification_id, hostname,
                              update_progress):
        """
           nova-compute service change to disable or enable.
           :param notification_id: Notification ID included in the notification
           :param hostname: Host name of brocade target
        """
        try:
            self.rc_config.set_request_context()
            db_engine = dbapi.get_engine(self.rc_config)
            session = dbapi.get_session(db_engine)
            self.rc_util_api.disable_host_status(hostname)

            if update_progress is True:
                self.rc_util_db.update_notification_list_db(
                    session,
                    'progress', 2, notification_id)

        except KeyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            return
        except:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            return
示例#5
0
 def crawl(site, spider, HeadlineSourceID):
     headline = HeadLine()
     try:
         if site["type"] == "regex":
             headline = spider.crawl_link_page_regex(site, headline)
         else:
             headline = spider.crawl_link_page_xpath(site, headline)
         console_msg = "%s %s; source: %s" % (tt.format_time(), headline.title, headline.source)
         print(console_msg)
         return headline
     except ERROR.TimeoutError as e:
         logging.critical("Http Request Timeout: id - " + HeadlineSourceID)
         exc_type, exc_value, exc_traceback = sys.exc_info()
         traceback_str = "".join(traceback.format_tb(exc_traceback))
         logging.critical(traceback_str)
         traceback.print_exc()
     except ERROR.MatchError as e:
         logging.critical("Xpath or Regex Error: id - " + HeadlineSourceID)
         exc_type, exc_value, exc_traceback = sys.exc_info()
         traceback_str = "".join(traceback.format_tb(exc_traceback))
         logging.critical(traceback_str)
         traceback.print_exc()
     except BaseException as e:
         logging.critical("Undefined Exception: id - " + HeadlineSourceID)
         exc_type, exc_value, exc_traceback = sys.exc_info()
         traceback_str = "".join(traceback.format_tb(exc_traceback))
         logging.critical(traceback_str)
         traceback.print_exc()
示例#6
0
def log_uncaught_exceptions(ex_cls, ex, tb):

    logging.critical(''.join(traceback.format_tb(tb)))
    logging.critical('{0}: {1}'.format(ex_cls, ex))
    
    print(''.join(traceback.format_tb(tb)))
    print('{0}: {1}'.format(ex_cls, ex))
示例#7
0
文件: test_util.py 项目: RazerM/h11
def test_LocalProtocolError():
    try:
        raise LocalProtocolError("foo")
    except LocalProtocolError as e:
        assert str(e) == "foo"
        assert e.error_status_hint == 400

    try:
        raise LocalProtocolError("foo", error_status_hint=418)
    except LocalProtocolError as e:
        assert str(e) == "foo"
        assert e.error_status_hint == 418

    def thunk():
        raise LocalProtocolError("a", error_status_hint=420)

    try:
        try:
            thunk()
        except LocalProtocolError as exc1:
            orig_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
            exc1._reraise_as_remote_protocol_error()
    except RemoteProtocolError as exc2:
        assert type(exc2) is RemoteProtocolError
        assert exc2.args == ("a",)
        assert exc2.error_status_hint == 420
        new_traceback = "".join(traceback.format_tb(sys.exc_info()[2]))
        assert new_traceback.endswith(orig_traceback)
示例#8
0
  def init(self):
    try:
      if not seiscomp3.Client.Application.init(self): return False

      try:
        self.eventID = self.commandline().optionString("event")
      except:
        sys.stderr.write("An eventID is mandatory")
        return False

      try:
        self.margin = self.commandline().optionInt("margin")
      except: pass

      try:
        self.streams = self.commandline().optionString("streams").split(",")
      except: pass

      try:
        self.allComponents = self.commandline().optionInt("all-components") != 0
      except: pass

      try:
        self.allLocations = self.commandline().optionInt("all-locations") != 0
      except: pass

      return True
    except:
      cla, exc, trbk = sys.exc_info()
      print cla.__name__
      print exc.__dict__["args"]
      print traceback.format_tb(trbk, 5)
示例#9
0
文件: async.py 项目: chrislaing/dask
def execute_task(key, task_info, dumps, loads, get_id, raise_on_exception=False):
    """
    Compute task and handle all administration

    See Also
    --------
    _execute_task - actually execute task
    """
    try:
        task, data = loads(task_info)
        result = _execute_task(task, data)
        id = get_id()
        result = dumps((result, None, id))
    except Exception as e:
        if raise_on_exception:
            raise
        exc_type, exc_value, exc_traceback = sys.exc_info()
        tb = ''.join(traceback.format_tb(exc_traceback))
        try:
            result = dumps((e, tb, None))
        except Exception as e:
            if raise_on_exception:
                raise
            exc_type, exc_value, exc_traceback = sys.exc_info()
            tb = ''.join(traceback.format_tb(exc_traceback))
            result = dumps((e, tb, None))
    return key, result
示例#10
0
    def __exit__(self, exc_type, exc_value, exc_tb):
        self.end = time()
        self.duration = self.end - self.start
        if exc_type:
            if issubclass(exc_type, SkipTest):
                self.result = 'SKIP'
            else:
                if issubclass(exc_type, AssertionError):
                    self.result = 'FAIL'
                    self.traceback = ''.join(format_tb(exc_tb)[:-1])
                else:
                    self.result = 'ERROR'
                    self.traceback = ''.join(format_tb(exc_tb))
                    self.error = '%s: %s' % (
                        exc_type.__name__, unicode(exc_value)
                    )
        else:
            self.result = 'OK'

        if hasattr(self.testcase, 'test_actions'):
            self.testcase.test_actions.append({
                "text": self.text,
                "start": self.start,
                "end": self.end,
                "duration": self.duration,
                "result": self.result,
                "error": self.error,
                "traceback": self.traceback
            })

        if self.testcase.config.verbosity > 1:
            msg = " ...%s\n" % self.result.lower()
            print_debug(self.out, msg)
示例#11
0
文件: synctest.py 项目: bartosh/pomni
    def _marshaled_dispatch(self, data, dispatch_method = None):
        id = None
        try:
            req = simplejson.loads(data)
            method = req['method']
            params = req['params']
            id     = req['id']

            if dispatch_method is not None:
                result = dispatch_method(method, params)
            else:
                result = self._dispatch(method, params)
            response = dict(id=id, result=result, error=None)
        except:
            extpe, exv, extrc = sys.exc_info()
            err = dict(type=str(extpe),
                       message=str(exv),
                       traceback=''.join(traceback.format_tb(extrc)))
            response = dict(id=id, result=None, error=err)
        try:
            return simplejson.dumps(response)
        except:
            extpe, exv, extrc = sys.exc_info()
            err = dict(type=str(extpe),
                       message=str(exv),
                       traceback=''.join(traceback.format_tb(extrc)))
            response = dict(id=id, result=None, error=err)
            return simplejson.dumps(response)
示例#12
0
文件: client.py 项目: cbun/narrative
    def get_assemblies(self, job_id=None, asm_id=None, stdout=False, outdir=None):
        if not job_id:
            raise NotImplementedError('Job id required')
        # Get node id
        res = requests.get('http://{}/user/{}/job/{}/assembly'.format(
                self.url, self.user, job_id), headers=self.headers)

        # Download files
        try:
            nodes_map = json.loads(res.text)
            if stdout: # Get first one and print
                asm_file = self.shock.download_file(nodes_map.values()[0], outdir=outdir)
                with open(asm_file) as f:
                    for line in f:
                        print line
            elif asm_id:
                ordered = collections.OrderedDict(sorted(nodes_map.items()))
                id = ordered.values()[int(asm_id)-1]
                self.shock.download_file(id , outdir=outdir)

            else:
                for node_id in nodes_map.values():
                    self.shock.download_file(node_id, outdir=outdir)
        except:
            print traceback.format_tb(sys.exc_info()[2])
            print sys.exc_info()
            raise Exception("Error retrieving results")
        return 
示例#13
0
    def handle_pending_instances(self):
        """
        method description.
        recovery-controller I do the recovery
        of outstanding recovery VM at startup.
        """
        try:
            self.rc_config.set_request_context()
            db_engine = dbapi.get_engine(self.rc_config)
            session = dbapi.get_session(db_engine)

            self._update_old_records_vm_list(session)
            result = self._find_reprocessing_records_vm_list(session)

            # [recover_starter]section
            recover_starter_dic = self.rc_config.get_value("recover_starter")
            semaphore_multiplicity = recover_starter_dic.get(
                "semaphore_multiplicity")

            # Set multiplicity by semaphore_multiplicity
            sem = threading.Semaphore(int(semaphore_multiplicity))

            # Execute vm_recovery_worker
            if len(result) > 0:
                # Execute the required number
                for row in result:
                    vm_uuid = row.uuid
                    primary_id = row.id
                    msg = "Run thread rc_worker.recovery_instance." \
                        + " vm_uuid=" + vm_uuid \
                        + " primary_id=" + str(primary_id)
                    LOG.info(msg)
                    thread_name = self.rc_util.make_thread_name(
                        VM_LIST, primary_id)
                    threading.Thread(
                        target=self.rc_worker.recovery_instance,
                        name=thread_name,
                        args=(vm_uuid, primary_id, sem)).start()

            # Imperfect_recover
            else:
                return

            return
        except KeyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            return
        except:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
            return
示例#14
0
    def create(self, environ, start_response, route_vars):
        """POST /: Create new Inception Cloud."""

        request_body = _read_request_body(environ)
        opt_dict = anyjson.deserialize(request_body)

        response_headers = [('Content-type', 'text/json')]
        status = '200 OK'
        result = {}

        try:
            # Copy request authorization environment to local environment
            for kw in OS_AUTH_KEYWORDS:
                os.environ[kw] = opt_dict[kw]

            ao = OrchestratorThread(opt_dict, 'create')
            ao.start()
            result = {'cloud': {'id': ao.ic.id.hex, }}
        except KeyError as ke:
            # KeyError almost certainly means the OpenStack authorization
            # environment (OS_*) wasn't provided making this a bad request
            t, v, tb = sys.exc_info()   # type, value, traceback
            status = '400 Bad Request'
            result = {'exception': {'type': t.__name__, 'value': v.args, }, }
        except Exception:
            t, v, tb = sys.exc_info()   # type, value, traceback
            print traceback.format_tb(tb)
            status = '500 Internal Server Error'
            result = {'exception': {'type': t.__name__, 'value': v.args, }, }
        finally:
            start_response(status, response_headers)
            return [anyjson.serialize(result)]
示例#15
0
def test_sync_error(loop_in_thread):
    loop = loop_in_thread
    try:
        result = sync(loop, throws, 1)
    except Exception as exc:
        f = exc
        assert 'hello' in str(exc)
        tb = get_traceback()
        L = traceback.format_tb(tb)
        assert any('throws' in line for line in L)

    def function1(x):
        return function2(x)

    def function2(x):
        return throws(x)

    try:
        result = sync(loop, function1, 1)
    except Exception as exc:
        assert 'hello' in str(exc)
        tb = get_traceback()
        L = traceback.format_tb(tb)
        assert any('function1' in line for line in L)
        assert any('function2' in line for line in L)
示例#16
0
  def __getExceptionString( self, lException = False, lExcInfo = False ):
    """
    Return a formated string with exception and traceback information
    If lExcInfo is present: full traceback
    Elif lException is present: only last call traceback
    Else: no traceback
    """
    if lExcInfo:
      if isinstance( lExcInfo, bool ):
        lExcInfo = sys.exc_info()
      # Get full traceback
      stack = "".join( traceback.format_tb( lExcInfo[2] ) )
    elif lException:
      # This is useless but makes pylint happy
      if not lException:
        lException = Exception()
      lExcInfo = sys.exc_info()
      try:
        args = lException.args
      except:
        return "Passed exception to the logger is not a valid Exception: %s" % str( lException )
      exceptType = lException.__class__.__name__
      value = ','.join( [str( arg ) for arg in args] )
      # Only print out last part of the traceback
      stack = traceback.format_tb( lExcInfo[2] )[-1]
    else:
      lExcInfo = sys.exc_info()
      stack = ""
    exceptType = lExcInfo[0].__name__
    value = lExcInfo[1]

    return "== EXCEPTION == %s\n%s\n%s: %s\n===============" % ( exceptType, stack, exceptType, value )
示例#17
0
    def _marshaled_dispatch(self, data, dispatch_method=None):

        logger.debug('data: "%s"' % data)
        id = None
        try:
            req = json.loads(data)
            method = req["method"]
            params = req["params"]
            id = req["id"]

            if dispatch_method is not None:
                result = dispatch_method(method, params)
            else:
                result = self._dispatch(method, params)
            response = dict(id=id, result=result, error=None)
        except:
            extpe, exv, extrc = sys.exc_info()
            err = dict(type=str(extpe), message=str(exv), traceback="".join(traceback.format_tb(extrc)))
            response = dict(id=id, result=None, error=err)
            logger.exception("Error while executing meth")
        try:
            return json.dumps(response)
        except:
            extpe, exv, extrc = sys.exc_info()
            err = dict(type=str(extpe), message=str(exv), traceback="".join(traceback.format_tb(extrc)))
            response = dict(id=id, result=None, error=err)
            logger.exception("Error while marschalling")
            return json.dumps(response)
示例#18
0
def displayTraceback(exception=None):

    global log
    import traceback

    if exception is None:
        tb = traceback.format_tb(sys.exc_info()[2])
    else:
        tb = traceback.format_tb(exception)

    if log:
        log("Traceback: " + str(sys.exc_info()[0].__name__) + ": " + str(sys.exc_info()[1]))

    for line in tb:
        if type(line) is tuple:
            xline = ""
            for item in line:
                xline += str(item) + " "
            line = xline

        line = line.strip("\n")
        if log:
            log(line)

    traceback.print_exc()
示例#19
0
    def _get_vmha_param(self, uuid):

        try:
            # Get need recovery infomation.
            sql = "SELECT recover_by, recover_to FROM vm_list " \
                  "WHERE uuid = \"%s\" " \
                  "ORDER BY create_at DESC LIMIT 1" % (uuid)

            conf_db_dic = self.rc_config.get_value('db')
            recover_data = self._do_action_db(conf_db_dic, sql)

            if recover_data is None:
                raise EnvironmentError("Failed to recovery info.")

            # Set return values.
            recover_by = recover_data.get('recover_by')
            recover_to = recover_data.get('recover_to')

        except EnvironmentError:
            self.rc_util.syslogout_ex("RecoveryControllerWorker_0007",
                                      syslog.LOG_ERR)
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            raise EnvironmentError
        except KeyError:
            self.rc_util.syslogout_ex("RecoveryControllerWorker_0008",
                                      syslog.LOG_ERR)
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            raise KeyError
        except MySQLdb.Error:
            self.rc_util.syslogout_ex("RecoveryControllerWorker_0009",
                                      syslog.LOG_ERR)
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            raise MySQLdb.Error
        except:
            self.rc_util.syslogout_ex("RecoveryControllerWorker_0010",
                                      syslog.LOG_ERR)
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            raise

        return recover_by, recover_to
示例#20
0
    def handle_exception(self, exc_type, exc_value, exc_traceback):

        # exceptions from blivet_utils have 'original' traceback as part of the message
        # but we want to show the original message and traceback separately
        exc_str, tr_str = self._parse_exception(exc_value)
        if tr_str is not None:
            tr_str += "------------------------------\n"
            tr_str += "".join(traceback.format_tb(exc_traceback))
        else:
            tr_str = "".join(traceback.format_tb(exc_traceback))

        allow_report = command_exists("gnome-abrt")
        allow_ignore = self.allow_ignore and not issubclass(exc_type, errors.CommunicationError)

        if allow_ignore:
            msg = _("Unknown error occured.\n{error}").format(error=exc_str)
        else:
            msg = _("Unknown error occured. Blivet-gui will be terminated.\n{error}").format(error=exc_str)

        dialog = message_dialogs.ExceptionDialog(self.main_window, allow_ignore,
                                                 allow_report, msg, tr_str)
        response = dialog.run()

        if response == constants.DialogResponseType.BACK:
            return
        else:
            # restore handler and re-raise original exception
            sys.excepthook = self.excepthook
            sys.excepthook(exc_type, exc_value, exc_traceback)

            if response == constants.DialogResponseType.REPORT:
                subprocess.call(["gnome-abrt"])

            Gtk.main_quit()
示例#21
0
    def handle_pending_instances(self):
        """
        method description.
        recovery-controller I do the recovery
        of outstanding recovery VM at startup.
        """
        try:
            db_engine = dbapi.get_engine()
            session = dbapi.get_session(db_engine)

            self._update_old_records_vm_list(session)
            result = self._find_reprocessing_records_vm_list(session)

            # [recover_starter]section
            recover_starter_dic = self.rc_config.get_value("recover_starter")
            semaphore_multiplicity = recover_starter_dic.get("semaphore_multiplicity")

            # Set multiplicity by semaphore_multiplicity
            sem = threading.Semaphore(int(semaphore_multiplicity))

            # Execute vm_recovery_worker
            if len(result) > 0:
                # Execute the required number
                for row in result:
                    vm_uuid = row.uuid
                    primary_id = row.id
                    self.rc_util.syslogout_ex("RecoveryControllerStarter_0032", syslog.LOG_INFO)
                    msg = (
                        "Run thread rc_worker.recovery_instance."
                        + " vm_uuid="
                        + vm_uuid
                        + " primary_id="
                        + str(primary_id)
                    )
                    self.rc_util.syslogout(msg, syslog.LOG_INFO)
                    threading.Thread(target=self.rc_worker.recovery_instance, args=(vm_uuid, primary_id, sem)).start()

            # Imperfect_recover
            else:
                return

            return
        except KeyError:
            self.rc_util.syslogout_ex("RecoveryControllerStarter_0020", syslog.LOG_ERR)
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            return
        except:
            self.rc_util.syslogout_ex("RecoveryControllerStarter_0021", syslog.LOG_ERR)
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            return
示例#22
0
文件: async.py 项目: BabeNovelty/dask
def execute_task(key, task, data, queue, get_id, raise_on_exception=False):
    """
    Compute task and handle all administration

    See also:
        _execute_task - actually execute task
    """
    try:
        result = _execute_task(task, data)
        id = get_id()
        result = key, result, None, id
    except Exception as e:
        if raise_on_exception:
            raise
        exc_type, exc_value, exc_traceback = sys.exc_info()
        tb = "".join(traceback.format_tb(exc_traceback))
        result = key, e, tb, None
    try:
        queue.put(result)
    except Exception as e:
        if raise_on_exception:
            raise
        exc_type, exc_value, exc_traceback = sys.exc_info()
        tb = "".join(traceback.format_tb(exc_traceback))
        queue.put((key, e, tb, None))
示例#23
0
    def _skip_process_accident_vm_recovery(self, uuid, vm_state):
        # Initalize status.
        status = self.STATUS_NORMAL

        try:
            self.rc_util_api.do_instance_reset(uuid, 'error')

            # Call nova stop API.
            self.rc_util_api.do_instance_stop(uuid)

            msg = ("Skipped recovery. instance_id:%s, "
                   "accident type: [qemu process accident]." % uuid)
            LOG.info(msg)

        except EnvironmentError:
            status = self.STATUS_ERROR
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)
        except:
            status = self.STATUS_ERROR
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            LOG.error(error_type)
            LOG.error(error_value)
            for tb in tb_list:
                LOG.error(tb)

        return status
示例#24
0
文件: Vk_bot2.py 项目: REDxEYE/VK_BOT
    def GetUserNameById(self, Id, case='nom'):

        try:
            if self.USERS.isCached(Id):
                User = self.USERS.getCache(Id)
            else:
                try:
                    User = self.DefApi.users.get(user_ids=Id, v="5.60", fields=['sex'], name_case=case)[0]
                    self.USERS.cacheUser(Id,User)
                except Exception as Ex:


                    exc_type, exc_value, exc_traceback = sys.exc_info()
                    TB = traceback.format_tb(exc_traceback)
                    print(TB,exc_type,exc_value)
                    User = None
        except:
            try:
                User = self.DefApi.users.get(user_ids=Id, v="5.60", fields=['sex'], name_case=case)[0]
                self.USERS.cacheUser(Id,User)
            except Exception as Ex:

                exc_type, exc_value, exc_traceback = sys.exc_info()
                TB = traceback.format_tb(exc_traceback)
                print(TB,exc_type,exc_value)
                User = None
        return User
 def SendData(self, address, data):
     try:
         self.transport.write(data, address)
         return True
     except Exception, e:
         print "failed sending message to %s:" % str(address)
         print traceback.format_tb(e)
         return False
示例#26
0
文件: vpv.py 项目: mpi2/vpv
def excepthook_overide(exctype, value, traceback_):
    """
    USed to override sys.xcepthook so we can log any ancaught Exceptions
    """
    logging.exception("""{} VPV encountered an uncaught erorr {}\n. Please email this log file to [email protected]
    {}\n{}\n{}""".format('#' * 10, '#' * 10, exctype, value,
                         ''.join(traceback.format_tb(traceback_))))
    print(exctype, value, ''.join(traceback.format_tb(traceback_)))
示例#27
0
文件: reporter.py 项目: mcenirm/vbox
def logXcptWorker(iLevel, fIncErrors, sPrefix="", sText=None, cFrames=1):
    """
    Log an exception, optionally with a preceeding message and more than one
    call frame.
    """
    g_oLock.acquire();
    if fIncErrors:
        g_oReporter.testIncErrors();

    ## @todo skip all this if iLevel is too high!

    # Try get exception info.
    sTsPrf = utils.getTimePrefix();
    try:
        oType, oValue, oTraceback = sys.exc_info();
    except:
        oType = oValue = oTraceback = None;
    if oType is not None:

        # Try format the info
        try:
            rc      = 0;
            sCaller = utils.getCallerName(oTraceback.tb_frame);
            if sText is not None:
                rc = g_oReporter.log(iLevel, "%s%s" % (sPrefix, sText), sCaller, sTsPrf);
            asInfo = [];
            try:
                asInfo = asInfo + traceback.format_exception_only(oType, oValue);
                if cFrames is not None and cFrames <= 1:
                    asInfo = asInfo + traceback.format_tb(oTraceback, 1);
                else:
                    asInfo.append('Traceback:')
                    asInfo = asInfo + traceback.format_tb(oTraceback, cFrames);
                    asInfo.append('Stack:')
                    asInfo = asInfo + traceback.format_stack(oTraceback.tb_frame.f_back, cFrames);
            except:
                g_oReporter.log(0, 'internal-error: Hit exception #2! %s' % (traceback.format_exc()), sCaller, sTsPrf);

            if len(asInfo) > 0:
                # Do the logging.
                for sItem in asInfo:
                    asLines = sItem.splitlines();
                    for sLine in asLines:
                        rc = g_oReporter.log(iLevel, '%s%s' % (sPrefix, sLine), sCaller, sTsPrf);

            else:
                g_oReporter.log(iLevel, 'No exception info...', sCaller, sTsPrf);
                rc = -3;
        except:
            g_oReporter.log(0, 'internal-error: Hit exception! %s' % (traceback.format_exc()), None, sTsPrf);
            rc = -2;
    else:
        g_oReporter.log(0, 'internal-error: No exception! %s'
                      % (utils.getCallerName(iFrame=3)), utils.getCallerName(iFrame=3), sTsPrf);
        rc = -1;

    g_oLock.release();
    return rc;
示例#28
0
 def run(self):
     self.connect()
     try:
         while True:
             log.debug("waiting for command...")
             data = self.recv_data()
             if data["cmd"] == self.QUIT:
                 log.debug("QUIT (%d)", self.QUIT)
                 break
             elif data["cmd"] == self.DEBUG:
                 log.debug("DEBUG")
                 self.toggle_debug()
             elif data["cmd"] == self.FILE:
                 log.debug("FILE (%d)", self.FILE)
                 self.recv_file(data)
             elif data["cmd"] == self.RUN:
                 log.debug("RUN (%d)", self.RUN)
                 log.debug("running cmd: %s", " ".join(data["cmd_to_run"]))
                 with tempfile.TemporaryFile() as fp:
                     try:
                         proc = subprocess.Popen(data["cmd_to_run"],
                                                 shell=False,
                                                 stdout=fp,
                                                 stderr=fp)
                         data = {"cmd":self.RESULT}
                         data["code"] = proc.wait()
                         fp.seek(0)
                         data["output"] = fp.read()
                         log.debug("command returned: %d", data["code"])
                     except Exception:
                         e = sys.exc_info()
                         log.debug("except - %s: %s", e[0].__name__, e[1])
                         data = {"cmd":self.EXCEPT,
                                 "msg":e[1],
                                 "name":e[0].__name__,
                                 "tb":"".join(traceback.format_tb(e[2]))}
                 log.debug("sending results...")
                 self.send_data(data)
             elif data["cmd"] == self.CODE:
                 log.debug("CODE (%d)", self.CODE)
                 try:
                     func = types.FunctionType(marshal.loads(data["code"]), globals(),
                                               data["name"], data["defaults"], data["closure"])
                     log.debug("%s() args:%s kwargs:%s", data["name"], data["args"], data["kwargs"])
                     data = {"cmd":self.RETURN, "value":func(*data["args"], **data["kwargs"])}
                 except Exception:
                     e = sys.exc_info()
                     log.debug("except - %s: %s", e[0].__name__, e[1])
                     data = {"cmd":self.EXCEPT,
                             "msg":e[1],
                             "name":e[0].__name__,
                             "tb":"".join(traceback.format_tb(e[2]))}
                 self.send_data(data)
             else:
                 log.debug("UNKNOWN (%s)", data)
                 raise RuntimeError("Unknown command: %d" % data["cmd"])
     finally:
         self.disconnect()
示例#29
0
def exceptionHandler(type_, value, trace):
    print("{} {} {}".format(type_, value, ''.join(traceback.format_tb(trace))))
    msg = QMessageBox()
    msg.setWindowTitle("Unhandled exception")
    msg.setIcon(QMessageBox.Critical)
    msg.setInformativeText("{} {}\nPlease report details".format(type_, value))
    msg.setDetailedText(("{} ".format(value)) + ''.join(traceback.format_tb(trace)))
    msg.raise_()
    msg.exec_()
示例#30
0
    def routing(self, request):
        """
        ルーティング
        """
        route = request.getvalue('Route', ['top'])
        session = request.getvalue('Session', None)
        setting = request.getvalue('Setting', {})
        post = request.getvalue('Post', MyStrage())

        # ルーティング情報がなければデフォルトをセット
        if route[0] == '':
            route = ['top']
        if len(route)<2:
            route.append('index')
        elif route[1]=='':
            route[1] = 'index'

        page = route[0]
        method = route[1]
        param = []
        for i in range(len(route)-2):
            param.append(route[i+2])

        # ページクラスを取得
        response_page = None
        try:
            # ページクラスを動的インポート
            page_class_name = page.capitalize()+'Page'
            page_class = getattr(__import__('page.'+page, {}, {}, page_class_name), page_class_name)
            response_page = page_class(request)
        except:
            import sys
            import traceback
            info = sys.exc_info()
            self.error_info.append([info, traceback.format_tb(info[2])[0]])
            response_page = None

        # ページクラスが読み込めなければ未実装と判定
        if response_page is None:
            from page.page import Page
            response_page = Page(request)

        # ページからメソッド取得
        try:
            response_method = getattr(response_page, method)
            page_data = response_method(param)
        except:
            import sys
            import traceback
            info = sys.exc_info()
            self.error_info.append([info, traceback.format_tb(info[2])])
            response_method = getattr(response_page, 'error')
            page_data = response_method(param)

        self.title = response_page.get_title()

        return page_data
示例#31
0
    def upload(self):
        if self.params['ovf'] is None:
            self.module.fail_json(msg="OVF path is required for upload operation.")

        ovf_dir = os.path.dirname(self.params['ovf'])

        lease, import_spec = self.get_lease()

        uploaders = []

        for file_item in import_spec.fileItem:
            device_upload_url = None
            for device_url in lease.info.deviceUrl:
                if file_item.deviceId == device_url.importKey:
                    device_upload_url = self._normalize_url(device_url.url)
                    break

            if not device_upload_url:
                lease.HttpNfcLeaseAbort(
                    vmodl.fault.SystemError(reason='Failed to find deviceUrl for file %s' % file_item.path)
                )
                self.module.fail_json(
                    msg='Failed to find deviceUrl for file %s' % file_item.path
                )

            vmdk_tarinfo = None
            if self.tar:
                vmdk = self.tar
                try:
                    vmdk_tarinfo = self.tar.getmember(file_item.path)
                except KeyError:
                    lease.HttpNfcLeaseAbort(
                        vmodl.fault.SystemError(reason='Failed to find VMDK file %s in OVA' % file_item.path)
                    )
                    self.module.fail_json(
                        msg='Failed to find VMDK file %s in OVA' % file_item.path
                    )
            else:
                vmdk = os.path.join(ovf_dir, file_item.path)
                try:
                    path_exists(vmdk)
                except ValueError:
                    lease.HttpNfcLeaseAbort(
                        vmodl.fault.SystemError(reason='Failed to find VMDK file at %s' % vmdk)
                    )
                    self.module.fail_json(
                        msg='Failed to find VMDK file at %s' % vmdk
                    )

            uploaders.append(
                VMDKUploader(
                    vmdk,
                    device_upload_url,
                    self.params['validate_certs'],
                    tarinfo=vmdk_tarinfo,
                    create=file_item.create
                )
            )

        total_size = sum(u.size for u in uploaders)
        total_bytes_read = [0] * len(uploaders)
        for i, uploader in enumerate(uploaders):
            uploader.start()
            while uploader.is_alive():
                time.sleep(0.1)
                total_bytes_read[i] = uploader.bytes_read
                lease.HttpNfcLeaseProgress(int(100.0 * sum(total_bytes_read) / total_size))

            if uploader.e:
                lease.HttpNfcLeaseAbort(
                    vmodl.fault.SystemError(reason='%s' % to_native(uploader.e[1]))
                )
                self.module.fail_json(
                    msg='%s' % to_native(uploader.e[1]),
                    exception=''.join(traceback.format_tb(uploader.e[2]))
                )
示例#32
0
def formatTraceback(tb=None):
    if (tb == None):
        extype, val, tb = sys.exc_info()
    tbs = "\n" + "".join(traceback.format_tb(tb))
    return tbs
示例#33
0
    def parse_source(self, source, cache=False):
        ''' Generate or update inventory for the source provided '''

        parsed = False
        display.debug(u'Examining possible inventory source: %s' % source)

        # use binary for path functions
        b_source = to_bytes(source)

        # process directories as a collection of inventories
        if os.path.isdir(b_source):
            display.debug(u'Searching for inventory files in directory: %s' %
                          source)
            for i in sorted(os.listdir(b_source)):

                display.debug(u'Considering %s' % i)
                # Skip hidden files and stuff we explicitly ignore
                if IGNORED.search(i):
                    continue

                # recursively deal with directory entries
                fullpath = to_text(os.path.join(b_source, i),
                                   errors='surrogate_or_strict')
                parsed_this_one = self.parse_source(fullpath, cache=cache)
                display.debug(u'parsed %s as %s' % (fullpath, parsed_this_one))
                if not parsed:
                    parsed = parsed_this_one
        else:
            # left with strings or files, let plugins figure it out

            # set so new hosts can use for inventory_file/dir vars
            self._inventory.current_source = source

            # try source with each plugin
            failures = []
            for plugin in self._fetch_inventory_plugins():

                plugin_name = to_text(
                    getattr(plugin, '_load_name',
                            getattr(plugin, '_original_path', '')))
                display.debug(u'Attempting to use plugin %s (%s)' %
                              (plugin_name, plugin._original_path))

                # initialize and figure out if plugin wants to attempt parsing this file
                try:
                    plugin_wants = bool(plugin.verify_file(source))
                except Exception:
                    plugin_wants = False

                if plugin_wants:
                    try:
                        # FIXME in case plugin fails 1/2 way we have partial inventory
                        plugin.parse(self._inventory,
                                     self._loader,
                                     source,
                                     cache=cache)
                        try:
                            plugin.update_cache_if_changed()
                        except AttributeError:
                            # some plugins might not implement caching
                            pass
                        parsed = True
                        display.vvv(
                            'Parsed %s inventory source with %s plugin' %
                            (source, plugin_name))
                        break
                    except AnsibleParserError as e:
                        display.debug('%s was not parsable by %s' %
                                      (source, plugin_name))
                        tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
                        failures.append({
                            'src': source,
                            'plugin': plugin_name,
                            'exc': e,
                            'tb': tb
                        })
                    except Exception as e:
                        display.debug(
                            '%s failed while attempting to parse %s' %
                            (plugin_name, source))
                        tb = ''.join(traceback.format_tb(sys.exc_info()[2]))
                        failures.append({
                            'src': source,
                            'plugin': plugin_name,
                            'exc': AnsibleError(e),
                            'tb': tb
                        })
                else:
                    display.vvv(
                        "%s declined parsing %s as it did not pass its verify_file() method"
                        % (plugin_name, source))
            else:
                if not parsed and failures:
                    # only if no plugin processed files should we show errors.
                    for fail in failures:
                        display.warning(
                            u'\n* Failed to parse %s with %s plugin: %s' %
                            (to_text(fail['src']), fail['plugin'],
                             to_text(fail['exc'])))
                        if 'tb' in fail:
                            display.vvv(to_text(fail['tb']))
                    if C.INVENTORY_ANY_UNPARSED_IS_FAILED:
                        raise AnsibleError(
                            u'Completely failed to parse inventory source %s' %
                            (source))
        if not parsed:
            if source != '/etc/ansible/hosts' or os.path.exists(source):
                # only warn if NOT using the default and if using it, only if the file is present
                display.warning("Unable to parse %s as an inventory source" %
                                source)

        # clear up, jic
        self._inventory.current_source = None

        return parsed
示例#34
0
        print(sOut)


try:
    if args.stdin:
        sb = []
        for line in sys.stdin:
            sb.append(line)

        pprint('\n'.join(sb))

    else:

        n = len(args.file)
        if n == 0:
            parser.print_help()
            print("One or more files required")
        for i, file in enumerate(args.file):
            sIn = file.read().replace('\n', '')
            file.close()
            pprint(sIn, n)

    sys.exit(0)

except Exception as e:
    exc_type, exc_value, exc_traceback = sys.exc_info()
    sTB = '\n'.join(traceback.format_tb(exc_traceback))
    print("Fatal exception: {}\n - msg: {}\n stack: {}".format(
        exc_type, exc_value, sTB))
    sys.exit(1)
示例#35
0
文件: web.py 项目: bwica/dpusher
    try:
        result = job(job_id, input)

        if hasattr(result, "__call__"):
            db.mark_job_as_completed(job_id)
            return flask.Response(result(), mimetype='application/json')
        else:
            db.mark_job_as_completed(job_id, result)

    except util.JobError, e:
        db.mark_job_as_errored(job_id, e.as_dict())
    except Exception, e:
        db.mark_job_as_errored(
            job_id,
            traceback.format_tb(sys.exc_traceback)[-1] + repr(e))

    api_key = db.get_job(job_id)['api_key']
    result_ok = send_result(job_id, api_key)

    if not result_ok:
        db.mark_job_as_failed_to_post_result(job_id)

    return job_status(job_id=job_id, show_job_key=True, ignore_auth=True)


def run_asynchronous_job(job, job_id, job_key, input):
    if not scheduler.running:
        scheduler.start()
    try:
        db.add_pending_job(job_id, job_key, **input)
示例#36
0
    def ext_pillar(self, pillar, errors=None):
        '''
        Render the external pillar data
        '''
        if errors is None:
            errors = []
        try:
            # Make sure that on-demand git_pillar is fetched before we try to
            # compile the pillar data. git_pillar will fetch a remote when
            # the git ext_pillar() func is run, but only for masterless.
            if self.ext and 'git' in self.ext \
                    and self.opts.get('__role') != 'minion':
                # Avoid circular import
                import salt.utils.gitfs
                import salt.pillar.git_pillar
                git_pillar = salt.utils.gitfs.GitPillar(
                    self.opts,
                    self.ext['git'],
                    per_remote_overrides=salt.pillar.git_pillar.
                    PER_REMOTE_OVERRIDES,
                    per_remote_only=salt.pillar.git_pillar.PER_REMOTE_ONLY)
                git_pillar.fetch_remotes()
        except TypeError:
            # Handle malformed ext_pillar
            pass
        if 'ext_pillar' not in self.opts:
            return pillar, errors
        if not isinstance(self.opts['ext_pillar'], list):
            errors.append('The "ext_pillar" option is malformed')
            log.critical(errors[-1])
            return pillar, errors
        ext = None
        # Bring in CLI pillar data
        if self.pillar_override:
            pillar = merge(pillar, self.pillar_override, self.merge_strategy,
                           self.opts.get('renderer', 'yaml'),
                           self.opts.get('pillar_merge_lists', False))

        for run in self.opts['ext_pillar']:
            if not isinstance(run, dict):
                errors.append('The "ext_pillar" option is malformed')
                log.critical(errors[-1])
                return {}, errors
            if next(six.iterkeys(run)) in self.opts.get(
                    'exclude_ext_pillar', []):
                continue
            for key, val in six.iteritems(run):
                if key not in self.ext_pillars:
                    log.critical('Specified ext_pillar interface {0} is '
                                 'unavailable'.format(key))
                    continue
                try:
                    ext = self._external_pillar_data(pillar, val, key)
                except Exception as exc:
                    errors.append('Failed to load ext_pillar {0}: {1}'.format(
                        key,
                        exc.__str__(),
                    ))
                    log.error(
                        'Execption caught loading ext_pillar \'%s\':\n%s', key,
                        ''.join(traceback.format_tb(sys.exc_info()[2])))
            if ext:
                pillar = merge(pillar, ext, self.merge_strategy,
                               self.opts.get('renderer', 'yaml'),
                               self.opts.get('pillar_merge_lists', False))
                ext = None
        return pillar, errors
def tableToLineOfBearing(inputTable,
                         inputCoordinateFormat,
                         inputXField,
                         inputYField,
                         inputBearingUnits,
                         inputBearingField,
                         inputDistanceUnits,
                         inputDistanceField,
                         outputLineFeatures,
                         inputLineType,
                         inputSpatialReference):
    '''
    Tool method for converting a table of starting points, bearings, and distances
    to line features.
    
    inputTable - input table, each row will be a separate line feature in output
    inputCoordinateFormat - coordinate notation format of input vertices
    inputXField - field in inputTable for vertex x-coordinate, or full coordinate
    inputYField - field in inputTable for vertex y-coordinate, or None
    inputBearingUnits -
    inputBearingField -
    inputDistanceUnits -
    inputDistanceField -
    outputLineFeatures - polyline feature class to create
    inputLineType - 
    inputSpatialReference - spatial reference of input coordinates
    
    returns polyline feature class
    
    inputCoordinateFormat must be one of the following:
    * DD_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DD_2: Longitude and latitude values are in two separate fields.
    * DDM_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DDM_2: Longitude and latitude values are in two separate fields.
    * DMS_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DMS_2: Longitude and latitude values are in two separate fields.
    * GARS: Global Area Reference System. Based on latitude and longitude, it divides and subdivides the world into cells.
    * GEOREF: World Geographic Reference System. A grid-based system that divides the world into 15-degree quadrangles and then subdivides into smaller quadrangles.
    * UTM_ZONES: The letter N or S after the UTM zone number designates only North or South hemisphere.
    * UTM_BANDS: The letter after the UTM zone number designates one of the 20 latitude bands. N or S does not designate a hemisphere.
    * USNG: United States National Grid. Almost exactly the same as MGRS but uses North American Datum 1983 (NAD83) as its datum.
    * MGRS: Military Grid Reference System. Follows the UTM coordinates and divides the world into 6-degree longitude and 20 latitude bands, but MGRS then further subdivides the grid zones into smaller 100,000-meter grids. These 100,000-meter grids are then divided into 10,000-meter, 1,000-meter, 100-meter, 10-meter, and 1-meter grids.
    
    inputBearingUnits must be one of the following:
    * DEGREES
    * MILS
    * RADS
    * GRAD
    
    inputDistanceUnits must be one of the following:
    * METERS
    * KILOMETERS
    * MILES
    * NAUTICAL_MILES
    * FEET
    * US_SURVEY_FEET
    
    inputLineType must be one of the following:
    * GEODESIC:
    * GREAT_CIRCLE:
    * RHUMB_LINE:
    * NORMAL_SECTION:
    
    '''
    try:
        deleteme = []
        joinFieldName = "JoinID"
        scratch = '%scratchGDB%'
        if env.scratchWorkspace:
            scratch = env.scratchWorkspace
            
        inputSpatialReference = _checkSpatialRef(inputSpatialReference)
            
        copyRows = os.path.join(scratch, "copyRows")
        arcpy.CopyRows_management(inputTable, copyRows)
        originalTableFieldNames = _tableFieldNames(inputTable, joinExcludeFields)
        addUniqueRowID(copyRows, joinFieldName)
        
        arcpy.AddMessage("Formatting start point...")
        copyCCN = os.path.join(scratch, "copyCCN")
        arcpy.ConvertCoordinateNotation_management(copyRows,
                                                   copyCCN,
                                                   inputXField,
                                                   inputYField,
                                                   inputCoordinateFormat,
                                                   "DD_NUMERIC",
                                                   joinFieldName,
                                                   inputSpatialReference)
        
        arcpy.AddMessage("Creating lines as {0}...".format(inputLineType))
        arcpy.BearingDistanceToLine_management(copyCCN,
                                               outputLineFeatures,
                                               "DDLon",
                                               "DDLat",
                                               inputDistanceField,
                                               inputDistanceUnits,
                                               inputBearingField,
                                               inputBearingUnits,
                                               inputLineType,
                                               joinFieldName,
                                               inputSpatialReference)
        
        #Join original table fields to output
        arcpy.AddMessage("Joining fields from input table to output line features...")
        arcpy.JoinField_management(outputLineFeatures, joinFieldName,
                                   copyRows, joinFieldName,
                                   originalTableFieldNames)
        arcpy.DeleteField_management(outputLineFeatures,
                                     [joinFieldName])
        
        return outputLineFeatures
    
    except arcpy.ExecuteError:
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        print(msgs)

    except:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # Print Python error messages for use in Python / Python Window
        print(pymsg + "\n")
        print(msgs)
        
    finally:
        if len(deleteme) > 0:
            # cleanup intermediate datasets
            if debug == True: arcpy.AddMessage("Removing intermediate datasets...")
            for i in deleteme:
                if debug == True: arcpy.AddMessage("Removing: " + str(i))
                arcpy.Delete_management(i)
            if debug == True: arcpy.AddMessage("Done")
示例#38
0
        dropFieldFUI = [
            "Observed", "Cond", "LocalR2", "Predicted", "Intercept", "C1_pre1",
            "C2_pre2", "C3_pre3", "C4_pre4", "Residual", "StdError",
            "StdErr_Int", "StdErrC1_p", "StdErrC2_p", "StdErrC3_p",
            "StdErrC4_p", "StdResid"
        ]
        arcpy.DeleteField_management(GWR_reclassify, dropFieldFUI)

        del row
        del enumeration

        # Delete the intermediate shapefile
        arcpy.Delete_management(rasterPoint1)
        arcpy.Delete_management(rasterPoint2)
        arcpy.Delete_management(rasterPoint3)
        arcpy.Delete_management(rasterPoint4)
        arcpy.Delete_management(rasterPoint5)

    except Exception as e:
        # If unsuccessful, end gracefully by indicating why
        arcpy.AddError('\n' + "Script failed because: \t\t" + e.message)
        # ... and where
        exceptionreport = sys.exc_info()[2]
        fullermessage = traceback.format_tb(exceptionreport)[0]
        arcpy.AddError("at this location: \n\n" + fullermessage + "\n")

    # Check in Spatial Analyst extension license
    arcpy.CheckInExtension("spatial")
else:
    print "Spatial Analyst license is " + arcpy.CheckExtension("spatial")
def format_exception(my_globals):
    """
  Add a dump of any variables we can identify from the failing program
  line to the end of the traceback.  The deep mojo for doing this came
  from an example in the Zope core plus documentation in the Python
  Quick Reference.
  """
    def get_meta_data(vstr, var):
        """
    Derive metadata of the variable, like len(), keys()
    """
        result = ''

        if isinstance(var, dict):
            result += 'keys(' + vstr + ') = ' + str(var.keys()) + '\n        '

        elif type(var) in (list, tuple):
            result += 'len(' + vstr + ') = ' + str(len(var)) + '   '

        result += vstr + ' = ' + str(var) + '\n'
        return result

    etype, value, tb = exc_info()
    plaintb = format_tb(tb)
    result = ['Traceback (innermost last):\n']
    for line in plaintb:
        result.append('*****' + line)
        f = tb.tb_frame
        tb = tb.tb_next
        locals = f.f_locals
        print('*****', line, '\n    &&&&')

        # remove left part(s) of an assignment
        # [-2] is the code line, containing the error
        line = line.split('\n')[-2]
        while '=' in line:
            line = line[line.find('=') + 1:]

        # parse the rest of the line
        vars = varsplitter.split(line)

        dvars = set()
        self = None
        if 'self' in locals:
            self = locals['self']

        # remove empties
        while '' in vars:
            vars.remove('')

        #result.append ( 'PIEP33' + str ( vars ) + '\n' )
        #print 'PIEP33' + str ( vars ) + '\n'
        for i, v in enumerate(vars):
            if v in dvars:
                continue
            dvars.add(v)

            #print 'HHHYYTT', v

            if self and hasattr(self, v):
                #print 'AAP1'
                result.append('      self:   %s: %r\n' % (v, getattr(self, v)))

            elif v in locals:
                line = '      local:  '
                var = locals[v]
                line += get_meta_data(v, var)
                result.append(line)

            elif v in globals():
                #print 'AAP3'
                result.append('      global: %s: %r\n' % (v, globals()[v]))
            elif v in my_globals():
                #print 'AAP4'
                result.append('      my_global: %s: %r\n' %
                              (v, my_globals()[v]))
            else:
                try:
                    if eval('isbuiltin ( "' + v + '")'):
                        pass  # print 'BUILTIN',v
                    elif eval('iskeyword ( "' + v + '")'):
                        pass  #print 'KeyWord',v
                except:
                    pass

        #print 'PIEP44'+ str ( vars ) + '\n'
        """
    line = line.replace ( ' ', '' )
    parsed = parse_list ( line )
    result.append ( '\n***** ' + line )

    for elem in parsed :
      # we need to display the length of lists
      if elem.find ( '[' ) > 0 :
        i = 0
        splitted = []
        while '[' in elem [ i : ] :
          i = elem.find ( '[', i )
          splitted.append ( elem [ : i ] )
          LB = 1
          ii = i + 1
          # find belonging right bracket
          while ( ii < len ( elem ) ) and ( LB > 0 ):
            if elem [ii] == '[' :
              LB += 1
            elif elem [ii] == ']' :
              LB -= 1
            ii += 1
          splitted.append ( elem [ i : ii ] )
          if elem [ ii : ] :
            splitted.append ( elem [ ii : ] )
          i += ii + 1
        i = 1
        for item in splitted [ 2 : ] :
          if '[' in item :
            i += 1
        for ii in range ( i ) :
          item = ''.join ( splitted [ : ii+1 ] )
          result.append ( 'len(' + item + ')='+ str ( eval ( 'len(' + item + ')' ))+ ',  ', )

      if elem in locals:
        result.append ( elem + '=' + str ( eval ( elem ) ), globals, locals )
    """
    result.extend(format_exception_only(etype, value))
    return ''.join(result)
示例#40
0
    def _create_notification_list_db(self, jsonData):
        ret_dic = {}

        # Get DB from here and pass it to _check_retry_notification
        try:
            # Get session for db
            db_engine = dbapi.get_engine()
            session = dbapi.get_session(db_engine)
            if self._check_retry_notification(jsonData, session):
                self.rc_util.syslogout_ex("RecoveryController_0030",
                                          syslog.LOG_INFO)
                msg = "Duplicate notifications. id:" + jsonData.get("id")
                self.rc_util.syslogout(msg, syslog.LOG_INFO)
                self.rc_util.syslogout(jsonData, syslog.LOG_INFO)

            # Node Recovery(processing A)
            elif jsonData.get("type") == "rscGroup" and \
                    str(jsonData.get("eventID")) == "1" and \
                    str(jsonData.get("eventType")) == "2" and \
                    str(jsonData.get("detail")) == "2":

                tdatetime = datetime.datetime.strptime(jsonData.get("time"),
                                                       '%Y%m%d%H%M%S')
                if not self._check_repeated_notify(
                        tdatetime, jsonData.get("hostname"), session):
                    recover_by = 0  # node recovery
                    ret_dic = self.rc_util_db.insert_notification_list_db(
                        jsonData, recover_by, session)
                    self.rc_util.syslogout_ex("RecoveryController_0014",
                                              syslog.LOG_INFO)
                    self.rc_util.syslogout(jsonData, syslog.LOG_INFO)
                else:
                    # Duplicate notifications.
                    self.rc_util.syslogout_ex("RecoveryController_0015",
                                              syslog.LOG_INFO)
                    msg = "Duplicate notifications. id:" + jsonData.get("id")
                    self.rc_util.syslogout(msg, syslog.LOG_INFO)
                    self.rc_util.syslogout(jsonData, syslog.LOG_INFO)

            # VM Recovery(processing G)
            elif jsonData.get("type") == 'VM' and \
                    str(jsonData.get("eventID")) == '0' and \
                    str(jsonData.get("eventType")) == '5' and \
                    str(jsonData.get("detail")) == '5':

                recover_by = 1  # VM recovery
                ret_dic = self.rc_util_db.insert_notification_list_db(
                    jsonData, recover_by, session)
                self.rc_util.syslogout_ex("RecoveryController_0019",
                                          syslog.LOG_INFO)
                self.rc_util.syslogout(jsonData, syslog.LOG_INFO)

            # Node Lock(processing D and F)
            # Node will be locked.
            elif (jsonData.get("type") == 'nodeStatus') or \
                 ((jsonData.get("type") == 'rscGroup' and
                   str(jsonData.get("eventID")) == '1' and
                   str(jsonData.get("eventType")) == '2') and
                  (str(jsonData.get("detail")) == '3' or
                   str(jsonData.get("detail")) == '4')):

                tdatetime = datetime.datetime.strptime(jsonData.get("time"),
                                                       '%Y%m%d%H%M%S')
                if not self._check_repeated_notify(
                        tdatetime, jsonData.get("hostname"), session):

                    recover_by = 2  # NODE lock
                    ret_dic = self.rc_util_db.insert_notification_list_db(
                        jsonData, recover_by, session)
                    self.rc_util.syslogout_ex("RecoveryController_0021",
                                              syslog.LOG_INFO)
                    self.rc_util.syslogout(jsonData, syslog.LOG_INFO)
                else:
                    # Duplicate notifications.
                    self.rc_util.syslogout_ex("RecoveryController_0036",
                                              syslog.LOG_INFO)
                    msg = "Duplicate notifications. id:" + jsonData.get("id")
                    self.rc_util.syslogout(msg, syslog.LOG_INFO)
                    self.rc_util.syslogout(jsonData, syslog.LOG_INFO)

            # Do not recover(Excuted Stop API)
            elif jsonData.get("type") == "VM" and \
                    str(jsonData.get("eventID")) == "0" and \
                    str(jsonData.get("eventType")) == "5" and \
                    str(jsonData.get("detail")) == "1":
                self.rc_util.syslogout_ex("RecoveryController_0022",
                                          syslog.LOG_INFO)
                self.rc_util.syslogout(jsonData, syslog.LOG_INFO)
                msg = "Do not recover instance.(Excuted Stop API)"
                self.rc_util.syslogout(msg, syslog.LOG_INFO)

            # Notification of starting node.
            elif jsonData.get("type") == "rscGroup" and \
                    str(jsonData.get("eventID")) == "1" and \
                    str(jsonData.get("eventType")) == "1" and \
                    str(jsonData.get("detail")) == "1":
                self.rc_util.syslogout_ex("RecoveryController_0023",
                                          syslog.LOG_INFO)
                self.rc_util.syslogout(jsonData, syslog.LOG_INFO)
                msg = "Recieved notification of node starting. Node:" + \
                      jsonData['hostname']
                self.rc_util.syslogout(msg, syslog.LOG_INFO)

            # Ignore notification
            else:
                self.rc_util.syslogout_ex("RecoveryController_0024",
                                          syslog.LOG_INFO)
                self.rc_util.syslogout(jsonData, syslog.LOG_INFO)
                msg = "Ignore notification. Notification:" + str(jsonData)
                self.rc_util.syslogout(msg, syslog.LOG_INFO)
        except Exception:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout_ex("RecoveryController_0046",
                                      syslog.LOG_ERR)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            raise
        return ret_dic
示例#41
0
    def _notification_reciever(self, env, start_response):
        try:
            len = env['CONTENT_LENGTH']
            if len > 0:
                body = env['wsgi.input'].read(len)
                json_data = json.loads(body)

                self.rc_util.syslogout_ex("RecoveryController_0009",
                                          syslog.LOG_INFO)
                msg = "Recieved notification : " + body
                self.rc_util.syslogout(msg, syslog.LOG_INFO)

                ret = self._check_json_param(json_data)
                if ret == 1:
                    # Return Response
                    start_response('400 Bad Request',
                                   [('Content-Type', 'text/plain')])
                    return ['method _notification_reciever returned.\r\n']

                # Insert notification into notification_list_db
                notification_list_dic = {}
                notification_list_dic = self._create_notification_list_db(
                    json_data)

                # Return Response
                start_response('200 OK', [('Content-Type', 'text/plain')])

                if notification_list_dic != {}:
                    # Start thread
                    if notification_list_dic.get("recover_by") == 0 and \
                       notification_list_dic.get("progress") == 0:

                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_hostname"),
                                False,
                            ))
                        th.start()

                        # Sleep until nova recognizes the node down.
                        self.rc_util.syslogout_ex("RecoveryController_0029",
                                                  syslog.LOG_INFO)
                        dic = self.rc_config.get_value('recover_starter')
                        node_err_wait = dic.get("node_err_wait")
                        msg = (
                            "Sleeping %s sec before starting recovery"
                            "thread until nova recognizes the node down..." %
                            (node_err_wait))
                        self.rc_util.syslogout(msg, syslog.LOG_INFO)
                        greenthread.sleep(int(node_err_wait))

                        retry_mode = False
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_host,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_hostname"),
                                notification_list_dic.get(
                                    "notification_cluster_port"),
                                retry_mode,
                            ))

                        th.start()
                    elif notification_list_dic.get("recover_by") == 0 and \
                            notification_list_dic.get("progress") == 3:
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_hostname"),
                                False,
                            ))
                        th.start()
                    elif notification_list_dic.get("recover_by") == 1:
                        retry_mode = False
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_instance,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get("notification_uuid"),
                                retry_mode,
                            ))
                        th.start()
                    elif notification_list_dic.get("recover_by") == 2:
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            args=(
                                notification_list_dic.get("notification_id"),
                                notification_list_dic.get(
                                    "notification_hostname"),
                                True,
                            ))
                        th.start()
                    else:
                        self.rc_util.syslogout_ex("RecoveryController_0010",
                                                  syslog.LOG_INFO)
                        self.rc_util.syslogout(
                            "Column \"recover_by\" \
                            on notification_list DB is invalid value.",
                            syslog.LOG_INFO)

        except exc.SQLAlchemyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout_ex("RecoveryController_0011",
                                      syslog.LOG_ERR)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            start_response('500 Internal Server Error',
                           [('Content-Type', 'text/plain')])
        except KeyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout_ex("RecoveryController_0012",
                                      syslog.LOG_ERR)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            start_response('500 Internal Server Error',
                           [('Content-Type', 'text/plain')])
        except:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout_ex("RecoveryController_0013",
                                      syslog.LOG_ERR)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            start_response('500 Internal Server Error',
                           [('Content-Type', 'text/plain')])

        return ['method _notification_reciever returned.\r\n']
示例#42
0
    def masakari(self):
        """
        RecoveryController class main processing:
        This processing checks the VM list table of DB.
        If an unprocessed VM exists, and start thread to execute the recovery
        process.
        Then, the processing starts the wsgi server and waits for the
        notification.
        """

        try:
            self.rc_util.syslogout_ex("RecoveryController_0004",
                                      syslog.LOG_INFO)
            self.rc_util.syslogout("masakari START.", syslog.LOG_INFO)

            # Get a session and do not pass it to other threads
            db_engine = dbapi.get_engine()
            session = dbapi.get_session(db_engine)

            self._update_old_records_notification_list(session)
            result = self._find_reprocessing_records_notification_list(session)
            preprocessing_count = len(result)

            if preprocessing_count > 0:
                for row in result:
                    if row.recover_by == 0:
                        # node recovery event
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            args=(
                                row.notification_id,
                                row.notification_hostname,
                                False,
                            ))
                        th.start()

                        # Sleep until updating nova-compute service status
                        # down.
                        self.rc_util.syslogout_ex("RecoveryController_0035",
                                                  syslog.LOG_INFO)
                        dic = self.rc_config.get_value('recover_starter')
                        node_err_wait = dic.get("node_err_wait")
                        msg = ("Sleeping %s sec before starting node recovery"
                               "thread, until updateing nova-compute"
                               "service status." % (node_err_wait))
                        self.rc_util.syslogout(msg, syslog.LOG_INFO)
                        greenthread.sleep(int(node_err_wait))

                        # Start add_failed_host thread
                        # TODO(sampath):
                        # Avoid create thread here,
                        # insted call rc_starter.add_failed_host
                        retry_mode = True
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_host,
                            args=(
                                row.notification_id,
                                row.notification_hostname,
                                row.notification_cluster_port,
                                retry_mode,
                            ))
                        th.start()

                    elif row.recover_by == 1:
                        # instance recovery event
                        # TODO(sampath):
                        # Avoid create thread here,
                        # insted call rc_starter.add_failed_instance
                        th = threading.Thread(
                            target=self.rc_starter.add_failed_instance,
                            args=(
                                row.notification_id,
                                row.notification_uuid,
                            ))
                        th.start()

                    else:
                        # maintenance mode event
                        th = threading.Thread(
                            target=self.rc_worker.host_maintenance_mode,
                            args=(
                                row.notification_id,
                                row.notification_hostname,
                                True,
                            ))
                        th.start()

            # Start handle_pending_instances thread
            # TODO(sampath):
            # Avoid create thread here,
            # insted call rc_starter.handle_pending_instances()
            th = threading.Thread(
                target=self.rc_starter.handle_pending_instances)
            th.start()

            # Start reciever process for notification
            conf_wsgi_dic = self.rc_config.get_value('wsgi')
            wsgi.server(
                eventlet.listen(('', int(conf_wsgi_dic['server_port']))),
                self._notification_reciever)
        except exc.SQLAlchemyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout_ex("RecoveryController_0005",
                                      syslog.LOG_ERR)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            sys.exit()
        except KeyError:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout_ex("RecoveryController_0006",
                                      syslog.LOG_ERR)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            sys.exit()
        except:
            error_type, error_value, traceback_ = sys.exc_info()
            tb_list = traceback.format_tb(traceback_)
            self.rc_util.syslogout_ex("RecoveryController_0007",
                                      syslog.LOG_ERR)
            self.rc_util.syslogout(error_type, syslog.LOG_ERR)
            self.rc_util.syslogout(error_value, syslog.LOG_ERR)
            for tb in tb_list:
                self.rc_util.syslogout(tb, syslog.LOG_ERR)
            sys.exit()
def tableToPolyline(inputTable,
                    inputCoordinateFormat,
                    inputXField,
                    inputYField,
                    outputPolylineFeatures,
                    inputLineField,
                    inputSortField,
                    inputSpatialReference):
    '''
    Converts a table of vertices to one or more polyline features.
    
    inputTable - input table, each row is a vertex
    inputCoordinateFormat - coordinate notation format of input vertices
    inputXField - field in inputTable for vertex x-coordinate, or full coordinate
    inputYField - field in inputTable for vertex y-coordinate, or None
    outputPolylineFeatures - polyline feature class to create
    inputLineField - field in inputTable to identify separate polylines
    inputSortField - field in inputTable to sort vertices
    inputSpatialReference - spatial reference of input coordinates
    
    returns polyline feature class
    
    inputCoordinateFormat must be one of the following:
    * DD_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DD_2: Longitude and latitude values are in two separate fields.
    * DDM_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DDM_2: Longitude and latitude values are in two separate fields.
    * DMS_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DMS_2: Longitude and latitude values are in two separate fields.
    * GARS: Global Area Reference System. Based on latitude and longitude, it divides and subdivides the world into cells.
    * GEOREF: World Geographic Reference System. A grid-based system that divides the world into 15-degree quadrangles and then subdivides into smaller quadrangles.
    * UTM_ZONES: The letter N or S after the UTM zone number designates only North or South hemisphere.
    * UTM_BANDS: The letter after the UTM zone number designates one of the 20 latitude bands. N or S does not designate a hemisphere.
    * USNG: United States National Grid. Almost exactly the same as MGRS but uses North American Datum 1983 (NAD83) as its datum.
    * MGRS: Military Grid Reference System. Follows the UTM coordinates and divides the world into 6-degree longitude and 20 latitude bands, but MGRS then further subdivides the grid zones into smaller 100,000-meter grids. These 100,000-meter grids are then divided into 10,000-meter, 1,000-meter, 100-meter, 10-meter, and 1-meter grids.
     
    '''
    try:
        deleteme = []
        joinFieldName = "JoinID"
        scratch = '%scratchGDB%'
        if env.scratchWorkspace:
            scratch = env.scratchWorkspace
            
        inputSpatialReference = _checkSpatialRef(inputSpatialReference)
        
        copyRows = os.path.join(scratch, "copyRows")
        arcpy.CopyRows_management(inputTable, copyRows)
        addUniqueRowID(copyRows, joinFieldName)
        
        copyCCN = os.path.join(scratch, "copyCCN")
        arcpy.ConvertCoordinateNotation_management(copyRows,
                                                   copyCCN,
                                                   inputXField,
                                                   inputYField,
                                                   inputCoordinateFormat,
                                                   "DD_NUMERIC",
                                                   joinFieldName,
                                                   inputSpatialReference)
        
        arcpy.PointsToLine_management(copyCCN,
                                      outputPolylineFeatures,
                                      inputLineField,
                                      inputSortField,
                                      "CLOSE")
        
        return outputPolylineFeatures
    
    except arcpy.ExecuteError:
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        print(msgs)

    except:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # Print Python error messages for use in Python / Python Window
        print(pymsg + "\n")
        print(msgs)
        
    finally:
        if len(deleteme) > 0:
            # cleanup intermediate datasets
            if debug == True: arcpy.AddMessage("Removing intermediate datasets...")
            for i in deleteme:
                if debug == True: arcpy.AddMessage("Removing: " + str(i))
                arcpy.Delete_management(i)
            if debug == True: arcpy.AddMessage("Done")
示例#44
0
        if fldr.endswith('.gdb'):
            outfile = fldr + ".zip"
            try:
                zip = zipfile.ZipFile(os.path.join(wkPath, outfile), 'w',
                                      zipfile.ZIP_DEFLATED)
                zipws(os.path.join(wkPath, fldr), zip, False)
                zip.close()
                print("  >> {} zipped successfully".format(outfile))

            except RuntimeError:
                # Delete zip file if it exists
                if os.path.exists(os.path.join(wkPath, outfile)):
                    os.unlink(os.path.join(wkPath, outfile))
                zip = zipfile.ZipFile(os.path.join(wkPath, outfile), 'w',
                                      zipfile.ZIP_STORED)
                zipws(os.path.join(wkPath, fldr), zip, False)
                zip.close()
                print(
                    "  >> {} zipped, however unable to compress zip file contents."
                    .format(outfile))

except:

    # Return any Python specific errors
    tb = sys.exc_info()[2]
    tbinfo = traceback.format_tb(tb)[0]
    print("PYTHON ERRORS:\nTraceback Info:\n{}\nError Info:\n    {}: {}\n".
          format(tbinfo, sys.exc_type, sys.exc_value))

raw_input("Press Enter To Close")
def polylineToPolygon(inputPolylines, inputIDFieldName, outputPolygons):
    '''
    Converts polyline to polygon features. All closed features will
    be converted to polygons. Unclosed polylines, or polylines with
    less than 2 vertices will not convert.
    
    inputPolylines - input polyline feature class
    idFieldName - field in inputPolylines to separate individual features
    outputPolygons - polygon feature class to be created
    
    returns polygon feature class
    '''
    try:
        env.overwriteOutput = True
        #Create output Poly FC
        sr = arcpy.Describe(inputPolylines).spatialReference
        # if debug:
        #     arcpy.AddMessage("Spatial reference is " + str(sr.name))
        #     arcpy.AddMessage("Creating output feature class...")
        outpolygonsFC = arcpy.CreateFeatureclass_management(os.path.dirname(outputPolygons),
                                                            os.path.basename(outputPolygons),
                                                            "POLYGON",
                                                            "#",
                                                            "#",
                                                            "#",
                                                            sr)
        
        inFields = ["SHAPE@"]
        if inputIDFieldName:
            #Add ID field
            if debug:
                arcpy.AddMessage("Adding ID field: %s ..." % str(inputIDFieldName))
            arcpy.AddField_management(outpolygonsFC,inputIDFieldName, "LONG")
            inFields = ["SHAPE@", inputIDFieldName]

        if debug:
            arcpy.AddMessage("Converting Polylines to Polygons...")

        #Open Search cursor on polyline
        inRows = arcpy.da.SearchCursor(inputPolylines, inFields)
    
        #Open Insert cursor on polygons
        outRows = arcpy.da.InsertCursor(outpolygonsFC, inFields)
    
        polyArray = arcpy.Array()

        rowCount = 0

        for row in inRows:

            rowCount += 1

            # Provide feedback, since this method may take a while for large datasets
            if debug and not (rowCount % 100):
                arcpy.AddMessage('Processing Row: ' + str(rowCount))

            if inputIDFieldName:
                inID = row[1]

            # Polyline will only have one part
            featShape = row[0]
            polyline = featShape.getPart(0)

            polyArray.removeAll()
            polyArray.append(polyline)

            outPoly = arcpy.Polygon(polyArray, sr)

            if inputIDFieldName:
                outRows.insertRow([outPoly, inID])
            else:
                outRows.insertRow([outPoly])

        if debug:
            arcpy.AddMessage("Done converting polylines to polygons ...")
                
        #close cursors
        del outRows
        del inRows
        return outputPolygons
    
    except arcpy.ExecuteError:
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        print(msgs)

    except:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # Print Python error messages for use in Python / Python Window
        print(pymsg + "\n")
        print(msgs)
示例#46
0
文件: utils.py 项目: lu-w/vdirsyncer
def handle_cli_error(status_name=None, e=None):
    '''
    Print a useful error message for the current exception.

    This is supposed to catch all exceptions, and should never raise any
    exceptions itself.
    '''

    try:
        if e is not None:
            raise e
        else:
            raise
    except exceptions.UserError as e:
        cli_logger.critical(e)
    except StorageEmpty as e:
        cli_logger.error(
            '{status_name}: Storage "{name}" was completely emptied. If you '
            'want to delete ALL entries on BOTH sides, then use '
            '`vdirsyncer sync --force-delete {status_name}`. '
            'Otherwise delete the files for {status_name} in your status '
            'directory.'.format(name=e.empty_storage.instance_name,
                                status_name=status_name))
    except PartialSync as e:
        cli_logger.error(
            '{status_name}: Attempted change on {storage}, which is read-only'
            '. Set `partial_sync` in your pair section to `ignore` to ignore '
            'those changes, or `revert` to revert them on the other side.'.
            format(status_name=status_name, storage=e.storage))
    except SyncConflict as e:
        cli_logger.error(
            '{status_name}: One item changed on both sides. Resolve this '
            'conflict manually, or by setting the `conflict_resolution` '
            'parameter in your config file.\n'
            'See also {docs}/config.html#pair-section\n'
            'Item ID: {e.ident}\n'
            'Item href on side A: {e.href_a}\n'
            'Item href on side B: {e.href_b}\n'.format(status_name=status_name,
                                                       e=e,
                                                       docs=DOCS_HOME))
    except IdentConflict as e:
        cli_logger.error(
            '{status_name}: Storage "{storage.instance_name}" contains '
            'multiple items with the same UID or even content. Vdirsyncer '
            'will now abort the synchronization of this collection, because '
            'the fix for this is not clear; It could be the result of a badly '
            'behaving server. You can try running:\n\n'
            '    vdirsyncer repair {storage.instance_name}\n\n'
            'But make sure to have a backup of your data in some form. The '
            'offending hrefs are:\n\n{href_list}\n'.format(
                status_name=status_name,
                storage=e.storage,
                href_list='\n'.join(map(repr, e.hrefs))))
    except (click.Abort, KeyboardInterrupt, JobFailed):
        pass
    except exceptions.PairNotFound as e:
        cli_logger.error(
            'Pair {pair_name} does not exist. Please check your '
            'configuration file and make sure you\'ve typed the pair name '
            'correctly'.format(pair_name=e.pair_name))
    except exceptions.InvalidResponse as e:
        cli_logger.error(
            'The server returned something vdirsyncer doesn\'t understand. '
            'Error message: {!r}\n'
            'While this is most likely a serverside problem, the vdirsyncer '
            'devs are generally interested in such bugs. Please report it in '
            'the issue tracker at {}'.format(e, BUGTRACKER_HOME))
    except exceptions.CollectionRequired:
        cli_logger.error(
            'One or more storages don\'t support `collections = null`. '
            'You probably want to set `collections = ["from a", "from b"]`.')
    except Exception as e:
        tb = sys.exc_info()[2]
        import traceback
        tb = traceback.format_tb(tb)
        if status_name:
            msg = f'Unknown error occurred for {status_name}'
        else:
            msg = 'Unknown error occurred'

        msg += f': {e}\nUse `-vdebug` to see the full traceback.'

        cli_logger.error(msg)
        cli_logger.debug(''.join(tb))
def tableTo2PointLine(inputTable,
                        inputStartCoordinateFormat,
                        inputStartXField,
                        inputStartYField,
                        inputEndCoordinateFormat,
                        inputEndXField,
                        inputEndYField,
                        outputLineFeatures,
                        inputLineType,
                        inputSpatialReference):
    '''
    Creates line features from a start point coordinate and an endpoint coordinate.

    inputTable - Input Table
    inputStartCoordinateFormat - Start Point Format (from Value List)
    inputStartXField - Start X Field (longitude, UTM, MGRS, USNG, GARS, GEOREF)(from Input Table)
    inputStartYField - Start Y Field (latitude)(from Input Table)
    inputEndCoordinateFormat - End Point Format (from Value List)
    inputEndXField - End X Field (longitude, UTM, MGRS, USNG, GARS, GEOREF)(from Input Table)
    inputEndYField - End Y Field (latitude) (from Input Table)
    outputLineFeatures - Output Line
    inputLineType - Line Type (from Value List)
    inputSpatialReference - Spatial Reference, default is GCS_WGS_1984

    returns line feature class

    inputStartCoordinateFormat and inputEndCoordinateFormat must be one of the following:
    * DD_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DD_2: Longitude and latitude values are in two separate fields.
    * DDM_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DDM_2: Longitude and latitude values are in two separate fields.
    * DMS_1: Both longitude and latitude values are in a single field. Two values are separated by a space, a comma, or a slash.
    * DMS_2: Longitude and latitude values are in two separate fields.
    * GARS: Global Area Reference System. Based on latitude and longitude, it divides and subdivides the world into cells.
    * GEOREF: World Geographic Reference System. A grid-based system that divides the world into 15-degree quadrangles and then subdivides into smaller quadrangles.
    * UTM_ZONES: The letter N or S after the UTM zone number designates only North or South hemisphere.
    * UTM_BANDS: The letter after the UTM zone number designates one of the 20 latitude bands. N or S does not designate a hemisphere.
    * USNG: United States National Grid. Almost exactly the same as MGRS but uses North American Datum 1983 (NAD83) as its datum.
    * MGRS: Military Grid Reference System. Follows the UTM coordinates and divides the world into 6-degree longitude and 20 latitude bands, but MGRS then further subdivides the grid zones into smaller 100,000-meter grids. These 100,000-meter grids are then divided into 10,000-meter, 1,000-meter, 100-meter, 10-meter, and 1-meter grids.
    
    inputLineType must be one of the following:
    * GEODESIC:
    * GREAT_CIRCLE:
    * RHUMB_LINE:
    * NORMAL_SECTION:

    '''
    try:
        # get/set environment
        env.overwriteOutput = True
        
        deleteme = []
        scratch = '%scratchGDB%'
        
        joinFieldName = "JoinID"
        startXFieldName = "startX"
        startYFieldName = "startY"
        endXFieldName = "endX"
        endYFieldName = "endY"
        
        if env.scratchWorkspace:
            scratch = env.scratchWorkspace
        
        inputSpatialReference = _checkSpatialRef(inputSpatialReference)
            
        copyRows = os.path.join(scratch, "copyRows")
        arcpy.CopyRows_management(inputTable, copyRows)
        originalTableFieldNames = _tableFieldNames(inputTable, joinExcludeFields)
        addUniqueRowID(copyRows, joinFieldName)
        
        #Convert Start Point
        arcpy.AddMessage("Formatting start point...")
        startCCN = os.path.join(scratch, "startCCN")
        arcpy.ConvertCoordinateNotation_management(copyRows,
                                                   startCCN,
                                                   inputStartXField,
                                                   inputStartYField,
                                                   inputStartCoordinateFormat,
                                                   "DD_NUMERIC",
                                                   joinFieldName)
        arcpy.AddField_management(startCCN, startXFieldName, "DOUBLE")
        arcpy.CalculateField_management(startCCN, startXFieldName, "!DDLon!","PYTHON_9.3")
        arcpy.AddField_management(startCCN, startYFieldName, "DOUBLE")
        arcpy.CalculateField_management(startCCN, startYFieldName, "!DDLat!","PYTHON_9.3")
        arcpy.JoinField_management(copyRows, joinFieldName,
                                   startCCN, joinFieldName,
                                   [startXFieldName, startYFieldName]) 

        #Convert End Point
        arcpy.AddMessage("Formatting end point...")
        endCCN = os.path.join(scratch, "endCCN")
        arcpy.ConvertCoordinateNotation_management(copyRows,
                                                   endCCN,
                                                   inputEndXField,
                                                   inputEndYField,
                                                   inputEndCoordinateFormat,
                                                   "DD_NUMERIC",
                                                   joinFieldName)
        arcpy.AddField_management(endCCN, endXFieldName, "DOUBLE")
        arcpy.CalculateField_management(endCCN, endXFieldName, "!DDLon!","PYTHON_9.3")
        arcpy.AddField_management(endCCN, endYFieldName, "DOUBLE")
        arcpy.CalculateField_management(endCCN, endYFieldName, "!DDLat!","PYTHON_9.3")
        arcpy.JoinField_management(copyRows, joinFieldName,
                                   endCCN, joinFieldName,
                                   [endXFieldName, endYFieldName])

        #XY TO LINE
        arcpy.AddMessage("Connecting start point to end point as {0}...".format(inputLineType))
        arcpy.XYToLine_management(copyRows,
                                  outputLineFeatures,
                                  startXFieldName, startYFieldName,
                                  endXFieldName, endYFieldName,
                                  inputLineType,
                                  joinFieldName,
                                  inputSpatialReference)
        
        #Join original table fields to output
        arcpy.AddMessage("Joining fields from input table to output line features...")
        arcpy.JoinField_management(outputLineFeatures, joinFieldName,
                                   copyRows, joinFieldName,
                                   originalTableFieldNames)
        
        arcpy.DeleteField_management(outputLineFeatures, [joinFieldName,
                                                startXFieldName, startYFieldName,
                                                endXFieldName, endYFieldName])

        return outputLineFeatures

    except arcpy.ExecuteError: 
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)
        print(msgs)

    except:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # Print Python error messages for use in Python / Python Window
        print(pymsg + "\n")
        print(msgs)

    finally:
        if len(deleteme) > 0:
            # cleanup intermediate datasets
            if debug == True: arcpy.AddMessage("Removing intermediate datasets...")
            for i in deleteme:
                if debug == True: arcpy.AddMessage("Removing: " + str(i))
                arcpy.Delete_management(i)
            if debug == True: arcpy.AddMessage("Done")
示例#48
0
def RunTest():
    try:
        arcpy.AddMessage("Starting Test: HighestPoint")

        if arcpy.CheckExtension("Spatial") == "Available":
            arcpy.CheckOutExtension("Spatial")
        else:
            # Raise a custom exception
            raise Exception("LicenseError")

        # WORKAROUND
        print "Creating New Scratch Workspace (Workaround)"
        TestUtilities.createScratch()

        # Verify the expected configuration exists
        inputPolygonFC = os.path.join(TestUtilities.inputGDB,
                                      "samplePolygonArea")
        inputSurface = os.path.join(TestUtilities.defaultGDB,
                                    "Jbad_SRTM_USGS_EROS")
        outputPointsFC = os.path.join(TestUtilities.outputGDB, "HighestPoint")
        toolbox = TestUtilities.toolbox

        # Check For Valid Input
        objects2Check = []
        objects2Check.extend([inputPolygonFC, inputSurface, toolbox])
        for object2Check in objects2Check:
            desc = arcpy.Describe(object2Check)
            if desc == None:
                raise Exception("Bad Input")
            else:
                print "Valid Object: " + desc.Name

        # Set environment settings
        print "Running from: " + str(TestUtilities.currentPath)
        print "Geodatabase path: " + str(TestUtilities.geodatabasePath)

        arcpy.env.overwriteOutput = True
        arcpy.env.scratchWorkspace = TestUtilities.scratchGDB
        arcpy.ImportToolbox(toolbox, "VandR")

        inputFeatureCount = int(
            arcpy.GetCount_management(inputPolygonFC).getOutput(0))
        print "Input FeatureClass: " + str(inputPolygonFC)
        print "Input Feature Count: " + str(inputFeatureCount)

        if (inputFeatureCount < 1):
            print "Invalid Input Feature Count: " + str(inputFeatureCount)

        ########################################################3
        # Execute the Model under test:
        arcpy.HighestPoint_VandR(inputPolygonFC, inputSurface, outputPointsFC)
        ########################################################3

        # Verify the results
        outputFeatureCount = int(
            arcpy.GetCount_management(outputPointsFC).getOutput(0))
        print "Output FeatureClass: " + str(outputPointsFC)
        print "Output Feature Count: " + str(outputFeatureCount)

        if (outputPointsFC < 1):
            print "Invalid Output Feature Count: " + str(outputFeatureCount)
            raise Exception("Test Failed")

        # WORKAROUND: delete scratch db
        print "Deleting Scratch Workspace (Workaround)"
        TestUtilities.deleteScratch()

        print "Test Successful"

    except arcpy.ExecuteError:
        # Get the tool error messages
        msgs = arcpy.GetMessages()
        arcpy.AddError(msgs)

        # return a system error code
        sys.exit(-1)

    except Exception as e:
        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n" + str(
            sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages() + "\n"

        # Return python error messages for use in script tool or Python Window
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        # return a system error code
        sys.exit(-1)

    finally:
        # Check in the 3D Analyst extension
        arcpy.CheckInExtension("Spatial")
示例#49
0
def format_traceback(e: Exception) -> str:
    return json.dumps(traceback.format_tb(e.__traceback__), indent=3)
示例#50
0
def main():
    output_root = None

    # Get script parameters
    results = check_args()
    if not results:
        sys.exit(0)
    portal_address, adminuser, password, src_ids = results

    try:

        # Create portal connection object
        portal = Portal(portal_address, adminuser, password)

        # Check if any specified GUIDs do not exist
        invalid_guids = validate_guids(portal, src_ids)
        if len(invalid_guids) > 0:
            raise Exception(
                'ERROR: The following portal items do not exist: {}'.format(
                    invalid_guids))

        # Create list of users
        users = [org_user['username'] for org_user in portal.org_users()]
        target_users = [user for user in users if user not in exclude_users]

        # -----------------------------------------------------------------
        # Extract portal items
        # -----------------------------------------------------------------
        print '\n\n{}\nExtracting select portal items...\n{}\n'.format(
            sec_char * sec_len, sec_char * sec_len)

        # Create temporary extract folder in OS users' temp directory
        output_root = os.path.join(
            tempfile.gettempdir(),
            os.path.basename(sys.argv[0]).split('.')[0] + '_Extract')
        os.makedirs(output_root)

        print 'Extract folder: {}'.format(output_root)

        # Extract specified portal item(s)
        for src_id in src_ids:
            src_item = portal.item(src_id)
            os.chdir(output_root)
            print '- Extracting item {} "{}" ({}) user account {}...'.format(
                src_item['id'], src_item['title'], src_item['type'],
                src_item['owner'])
            PortalContentExtract.extract_item(portal, src_item['id'],
                                              src_item['owner'])

        # Create list of paths to individual extracted portal item folders
        src_item_paths = [
            os.path.join(output_root, src_id) for src_id in src_ids
        ]

        # -----------------------------------------------------------------
        # Publish extracted portal items for each user
        # -----------------------------------------------------------------
        print '\n\n{}\nPublish extracted items to each portal' \
                    'user account...\n{}'.format(sec_char * sec_len,
                                                 sec_char * sec_len)
        print 'NOTE: not publishing to the following users:'
        print exclude_users

        for target_user in target_users:
            print '\n\nUser Account: {}'.format(target_user)

            # Get info about user folders
            target_user_folders = portal.folders(target_user)

            for src_item_path in src_item_paths:

                # Get info about the source item
                os.chdir(src_item_path)
                src_item_json = json.load(open('item.json'))
                item_title = src_item_json['title']
                item_type = src_item_json['type']
                item_id = src_item_json['id']
                item_owner = src_item_json['owner']
                item_folder_id = src_item_json['ownerFolder']

                # Create folder in user account for item
                item_folder_name = get_folder_name(portal, item_owner,
                                                   item_folder_id)
                if item_folder_name:
                    if not has_folder(portal, target_user, item_folder_name):
                        print 'Creating target folder "{}" in account ' \
                                '{}...'.format(item_folder_name, target_user)
                        portal.create_folder(target_user, item_folder_name)

                # Check if user already owns item
                user_items = portal.search(
                    q='owner:{} AND type:{} AND title:{}'.format(
                        target_user, item_type, item_title))

                # Add item if item does not exist in user account or
                # update item if it already exists
                if len(user_items) == 0:
                    print '\n- Add item "{}" ({}) to user account {}...'.format(
                        item_title, item_type,
                        portal.logged_in_user()['username'])
                    item, orig_id = provision.load_item(portal, src_item_path)

                    print '- Reassign item to user account {}, ' \
                                'folder "{}"...'.format(target_user,
                                                        item_folder_name)
                    portal.reassign_item(item.get('id'), target_user,
                                         item_folder_name)
                else:
                    for user_item in user_items:
                        if user_item['id'] <> item_id:
                            print '\n- Update existing item {} ' \
                                    '"{}" ({}) user account {}...'.format(
                                            user_item['id'], user_item['title'],
                                            user_item['type'], user_item['owner'])
                            item, orig_id = provision.load_item(
                                portal, src_item_path, user_item['id'])

                            print '- Reassign item to user account {}, ' \
                                'folder "{}"...'.format(target_user,
                                                        item_folder_name)
                            portal.reassign_item(item.get('id'), target_user,
                                                 item_folder_name)

                        else:
                            print '*** No need to update item {}; ' \
                                    'user is owner of extracted item.'.format(
                                    user_item['id'])
        print '\n\nDone.'

    except:

        # Get the traceback object
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        # Concatenate information together concerning the error
        # into a message string
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + \
                "\nError Info:\n" + str(sys.exc_info()[1])

        # Print Python error messages for use in Python / Python Window
        print
        print "***** ERROR ENCOUNTERED *****"
        print pymsg + "\n"

    finally:

        # Change directory to starting directory, otherwise the
        # delete will fail.
        os.chdir(start_dir)

        # Delete temp extracted folder/files
        if output_root:
            if os.path.exists(output_root):
                shutil.rmtree(output_root)
示例#51
0
def get_traceback_string(exception):
    if hasattr(exception, '__traceback__'):
        tb_strings = traceback.format_tb(exception.__traceback__)
    else:
        tb_strings = traceback.format_exception(*sys.exc_info())
    return ''.join(tb_strings)
示例#52
0
def mail_log(parent=None,
             comment=None,
             helpdesk=None,
             sender=None,
             exception=None):

    if (comment is None) or (comment.strip() == ''):
        comment = wx.GetTextFromUser(message=_(
            'Please enter a short note on what you\n'
            'were about to do in GNUmed:'),
                                     caption=_('Sending bug report'),
                                     parent=parent)
        if comment.strip() == '':
            comment = '<user did not comment on bug report>'

    receivers = []
    if helpdesk is not None:
        receivers = regex.findall('[\S]+@[\S]+',
                                  helpdesk.strip(),
                                  flags=regex.UNICODE)
    if len(receivers) == 0:
        if _is_public_database:
            receivers = ['*****@*****.**']

    receiver_string = wx.GetTextFromUser(message=_(
        'Edit the list of email addresses to send the\n'
        'bug report to (separate addresses by spaces).\n'
        '\n'
        'Note that <*****@*****.**> refers to\n'
        'the public (!) GNUmed bugs mailing list.'),
                                         caption=_('Sending bug report'),
                                         default_value=','.join(receivers),
                                         parent=parent)
    if receiver_string.strip() == '':
        return

    receivers = regex.findall('[\S]+@[\S]+',
                              receiver_string,
                              flags=regex.UNICODE)

    dlg = gmGuiHelpers.c2ButtonQuestionDlg(
        parent,
        -1,
        caption=_('Sending bug report'),
        question=_('Your bug report will be sent to:\n'
                   '\n'
                   '%s\n'
                   '\n'
                   'Make sure you have reviewed the log file for potentially\n'
                   'sensitive information before sending out the bug report.\n'
                   '\n'
                   'Note that emailing the report may take a while depending\n'
                   'on the speed of your internet connection.\n') %
        '\n'.join(receivers),
        button_defs=[{
            'label': _('Send report'),
            'tooltip': _('Yes, send the bug report.')
        }, {
            'label': _('Cancel'),
            'tooltip': _('No, do not send the bug report.')
        }],
        show_checkbox=True,
        checkbox_msg=_('include log file in bug report'))
    dlg._CHBOX_dont_ask_again.SetValue(_is_public_database)
    go_ahead = dlg.ShowModal()
    if go_ahead == wx.ID_NO:
        dlg.DestroyLater()
        return

    include_log = dlg._CHBOX_dont_ask_again.GetValue()
    if not _is_public_database:
        if include_log:
            result = gmGuiHelpers.gm_show_question(
                _('The database you are connected to is marked as\n'
                  '"in-production with controlled access".\n'
                  '\n'
                  'You indicated that you want to include the log\n'
                  'file in your bug report. While this is often\n'
                  'useful for debugging the log file might contain\n'
                  'bits of patient data which must not be sent out\n'
                  'without de-identification.\n'
                  '\n'
                  'Please confirm that you want to include the log !'),
                _('Sending bug report'))
            include_log = (result is True)

    if sender is None:
        sender = _('<not supplied>')
    else:
        if sender.strip() == '':
            sender = _('<not supplied>')

    if exception is None:
        exc_info = ''
    else:
        t, v, tb = exception
        exc_info = 'Exception:\n\n type: %s\n value: %s\n\nTraceback:\n\n%s' % (
            t, v, ''.join(traceback.format_tb(tb)))

    msg = """\
Report sent via GNUmed's handler for unexpected exceptions.

user comment  : %s

client version: %s

system account: %s
staff member  : %s
sender email  : %s

 # enable Launchpad bug tracking
 affects gnumed
 tag automatic-report
 importance medium

%s
""" % (comment, _client_version, _local_account, _staff_name, sender, exc_info)
    if include_log:
        _log.error(comment)
        _log.warning('syncing log file for emailing')
        gmLog2.flush()
        attachments = [[_logfile_name, 'text/plain', 'quoted-printable']]
    else:
        attachments = None

    dlg.DestroyLater()

    wx.BeginBusyCursor()
    _cfg = gmCfgINI.gmCfgData()
    try:
        gmNetworkTools.compose_and_send_email(
            sender='%s <%s>' %
            (_staff_name, gmNetworkTools.default_mail_sender),
            receiver=receivers,
            subject='<bug>: %s' % comment,
            message=msg,
            server=gmNetworkTools.default_mail_server,
            auth={
                'user': gmNetworkTools.default_mail_sender,
                'password': '******'
            },
            debug=_cfg.get(option='debug'),
            attachments=attachments)
        gmDispatcher.send(signal='statustext',
                          msg=_('Bug report has been emailed.'))
    except Exception:
        _log.exception('cannot send bug report')
        gmDispatcher.send(signal='statustext',
                          msg=_('Bug report COULD NOT be emailed.'))
    finally:
        wx.EndBusyCursor()
示例#53
0
    return tuple()[0]


try:
    lumberjack()
except IndexError:
    exc_type, exc_value, exc_traceback = sys.exc_info()
    print("*** print_tb:")
    traceback.print_tb(exc_traceback, limit=1, file=sys.stdout)
    print("*** print_exception:")
    # exc_type below is ignored on 3.5 and later
    traceback.print_exception(exc_type,
                              exc_value,
                              exc_traceback,
                              limit=2,
                              file=sys.stdout)
    print("*** print_exc:")
    traceback.print_exc(limit=2, file=sys.stdout)
    print("*** format_exc, first and last line:")
    formatted_lines = traceback.format_exc().splitlines()
    print(formatted_lines[0])
    print(formatted_lines[-1])
    print("*** format_exception:")
    # exc_type below is ignored on 3.5 and later
    print(repr(traceback.format_exception(exc_type, exc_value, exc_traceback)))
    print("*** extract_tb:")
    print(repr(traceback.extract_tb(exc_traceback)))
    print("*** format_tb:")
    print(repr(traceback.format_tb(exc_traceback)))
    print("*** tb_lineno:", exc_traceback.tb_lineno)
示例#54
0
def run_accuracy_test_for_netcdf(options, eps):
	def get_array_from_netcdf_variable(netcdf_variable):
		num_of_dimensions = len(netcdf_variable.dimensions)
		if num_of_dimensions == 1:
			return netcdf_variable[:]
		if num_of_dimensions == 2:
			return netcdf_variable[:, :]
		if num_of_dimensions == 3:
			return netcdf_variable[:, :, :]
		if num_of_dimensions == 4:
			return netcdf_variable[:, :, :, :]
		if num_of_dimensions == 5:
			return netcdf_variable[:, :, :, :, :]

	from netCDF4 import Dataset
	import numpy
	inFile = None
	try:
		inFile = Dataset(options.inFile)
	except Exception as e:
		sys.stderr.write("Error: could not read %s. Error message: %s\n" %(options.inFile, str(e)))
		sys.exit(1)
	refFile = None
	try:
		refFile = Dataset(options.refFile)
	except Exception as e:
		sys.stderr.write("Error: could not read %s. Error message: %s\n" %(options.refFile, str(e)))
		sys.exit(1)
	error_found = False
	for key in inFile.variables.keys():
		try:
			in_array = None
			ref_array = None
			if not key in refFile.variables:
				sys.stderr.write("Error: variable %s not found in reference netcdf file %s\n" %(key, options.refFile))
				error_found = True
				continue
			in_variable = inFile.variables[key]
			if in_variable.dtype.kind == 'S':
				sys.stderr.write("Skipping variable %s with data type %s\n" %(key, in_variable.dtype))
				continue
			ref_variable = refFile.variables[key]
			if in_variable.dtype != ref_variable.dtype:
				sys.stderr.write("Error: variable %s has different datatypes - infile: %s, reference: %s\n" %(key, in_variable.dtype, ref_variable.dtype))
				error_found = True
				continue
			try:
				shape_comparison = numpy.equal(in_variable.shape, ref_variable.shape)
			except Exception:
				sys.stderr.write("Error: variable %s's shape (%s) could not be compared to the reference shape (%s)\n" %(key, str(in_variable.shape), ref_variable.shape))
				error_found = True
				continue
			if not numpy.all(shape_comparison):
				sys.stderr.write("Error: variable %s has different shapes - infile: %s, reference: %s\n" %(key, in_variable.shape, ref_variable.shape))
				error_found = True
				continue
			in_array = get_array_from_netcdf_variable(in_variable)
			ref_array = get_array_from_netcdf_variable(ref_variable)
			mean_or_one = numpy.mean(in_array)
			if abs(mean_or_one) < eps:
				mean_or_one = 1.0
			absolute_difference = numpy.abs(in_array - ref_array)
			greater_than_epsilon = (absolute_difference / mean_or_one) > eps
			passed_string = "pass"
			if numpy.count_nonzero(ref_array) == 0:
				passed_string += "(WARNING:Reference is Zero Matrix!)"
			root_mean_square_deviation = numpy.sqrt(numpy.mean((in_array - ref_array)**2))
			root_mean_square_deviation = root_mean_square_deviation / abs(mean_or_one)
			if math.isnan(root_mean_square_deviation):
				passed_string = "FAIL <-------"
				error_found = True
			elif numpy.any(greater_than_epsilon):
				error_found = True
				# first_error = numpy.unravel_index(numpy.argmax(greater_than_epsilon), greater_than_epsilon.shape)[0]
				number_of_elements = numpy.prod(in_array.shape)
				if number_of_elements <= 8:
					passed_string = "input: \n%s\nexpected:\n%s\nerrors found at:%s\nFAIL <-------" %(in_array, ref_array, greater_than_epsilon)
				else:
					first_occurrence = numpy.argmax(greater_than_epsilon==True)
					first_occurrence_index_tuple = numpy.unravel_index(first_occurrence, in_array.shape)
					first_err_val = in_array[first_occurrence_index_tuple]
					expected_val = ref_array[first_occurrence_index_tuple]
					passed_string = "first error at:%s; first error value: %s; expected: %s; array size: %s; FAIL <-------" %(first_occurrence_index_tuple, first_err_val, expected_val, in_array.shape)
					# numpy.set_printoptions(precision=3)
					# passed_string += "\n === curr === \n" + str(in_array)
					# passed_string += "\n === ref === \n" + str(ref_array)
			elif root_mean_square_deviation > eps:
				error_found = True
				passed_string = "FAIL <-------"
			sys.stderr.write("%s, variable %s: Mean square error: %e; %s\n" %(
				options.inFile,
				key,
				root_mean_square_deviation,
				passed_string
			))
		except Exception as e:
			message = "Variable %s\nError Message: %s\n%s\ninarray:%s\nrefarray:%s" %(key, str(e), traceback.format_tb(sys.exc_info()[2]), str(in_array), str(ref_array))
			e.args = (message,)+e.args[1:]
			raise e
	if error_found:
		sys.exit(1)
示例#55
0
    temperature_div = ngs.find("div", "today-panel__temperature")
    weather = {
        "temperature":
        temperature_div.find(
            "span", "value__main").get_text().encode("utf-8").strip().replace(
                " ", "") + "°C",
        "temperature_trend":
        "+" if temperature_div.find("i", "icon-temp_status-up") else "-",
        "description":
        temperature_div.find("span",
                             "value-description").get_text().strip().lower(),
        "romance":
        "Восход: %s, закат: %s" % tuple(
            ngs.find_all("div", "today-panel__info__main__item")[1].find_all(
                "dt")[0].get_text().encode("utf-8").strip().split(" − "))
    }

    open(os.path.join(os.path.dirname(__file__), "index.json"),
         "w").write(simplejson.dumps(weather))
except:
    msg = MIMEText(
        "Ошибка обновления погоды: %s\n%s" %
        (repr(sys.exc_info()[1]), traceback.format_tb(sys.exc_info()[2])[0]))
    msg["Subject"] = "Ошибка обновления погоды"
    msg["From"] = "*****@*****.**"
    msg["To"] = "*****@*****.**"

    s = smtplib.SMTP("localhost")
    s.sendmail(msg["From"], [msg["To"]], msg.as_string())
    s.quit()
示例#56
0
def MWCOG_BufferAndExport(point_shape, temp_prefix_short,
                          temp_prefix_long, startfresh, export):

    global my_out_fields
    global my_join_uniq_id

    #
    # Step 1: Message
    #

    mystarttime   = datetime.datetime.now()
    myendtime     = None
    myelapsedtime = None
    MWCOG_PrintWriter( "\n\tMWCOG_BufferAndExport (Started: " \
                      + str(mystarttime).split(".")[0] + ")" )
    MWCOG_PrintWriter( "\tShapefile  : " \
                      + str( ('..' + point_shape[(len(point_shape)\
                                                  -my_rightstringlength):])
                                               if len(point_shape) > my_rightstringlength
                                               else point_shape))
    MWCOG_PrintWriter( "\tShort Walk : " + str(dist_short) + ", Field = " \
                      + str(temp_prefix_short))
    MWCOG_PrintWriter( "\tLong Walk  : " + str(dist_long)  + ", Field = " \
                      + str(temp_prefix_long))

    #
    # Step 2: Create temporary file geodatabase
    #

    if (startfresh == 1):
        if (arcpy.Exists(my_GDB)) :
            MWCOG_PrintWriter( "\tdeleting geo-database ..." )
            arcpy.Delete_management(my_tmp_folder + "\\" + my_GDB_name)
        if (my_GDB_type == "PERSONAL"):
                MWCOG_PrintWriter( "\tcreating personal geo-database ...")
                arcpy.CreatePersonalGDB_management(my_tmp_folder,
                                                   my_GDB_name, "10.0")
        else:
                MWCOG_PrintWriter( "\tcreating file geo-database ...")
                arcpy.CreateFileGDB_management(my_tmp_folder,
                                               my_GDB_name, "10.0")
    else:
        if (arcpy.Exists(my_GDB)) : pass
        else:
                if (my_GDB_type == "PERSONAL"):
                        MWCOG_PrintWriter( "\tcreating personal geo-database ...")
                        arcpy.CreatePersonalGDB_management(my_tmp_folder,
                                                           my_GDB_name, "10.0")
                else:
                        MWCOG_PrintWriter( "\tcreating file geo-database ...")
                        arcpy.CreateFileGDB_management(my_tmp_folder,
                                                       my_GDB_name, "10.0")

    #
    # Step 3: Set Workspace(s)
    #

    arcpy.env.scratchWorkspace = my_GDB

    ## if (startfresh == 1):
    ##     MWCOG_PrintWriter( "\tlisting environment variables:" )
    ##     environments = arcpy.ListEnvironments()
    ##     for environment in environments:
    ##             envSetting = getattr(env, environment)
    ##             MWCOG_PrintWriter( "\t\t%-28s: %s" % (environment, envSetting) )

    #
    # Step 4: Work
    #

    try:
        #
        # Construct paths
        #
        temp_TAZ      = my_GDB        + "\\" + "TAZ_with_PercentWalkSheds"

        short_Temp1   = my_GDB        + "\\" + str(temp_prefix_short) + "Temp1"
        short_Temp2   = my_GDB        + "\\" + str(temp_prefix_short) + "Temp2"
        short_Temp3   = my_GDB        + "\\" + str(temp_prefix_short) + "Temp3"
        short_Temp4   = my_GDB        + "\\" + str(temp_prefix_short)
        short_OutFile = my_out_folder + "\\" + str(temp_prefix_short) + ".csv"

        long_Temp1    = my_GDB        + "\\" + str(temp_prefix_long)  + "Temp1"
        long_Temp2    = my_GDB        + "\\" + str(temp_prefix_long)  + "Temp2"
        long_Temp3    = my_GDB        + "\\" + str(temp_prefix_long)  + "Temp3"
        long_Temp4    = my_GDB        + "\\" + str(temp_prefix_long)
        long_OutFile  = my_out_folder + "\\" + str(temp_prefix_long)  + ".csv"

        #
        # Delete Existing Outputs
        #
        MWCOG_PrintWriter( "\tinitializing outputs ..." )

        if (startfresh == 1):

            # if starting afresh, copy TAZ layer into geodatabase and compute area
            if arcpy.Exists(temp_TAZ)  : arcpy.Delete_management(temp_TAZ)
            arcpy.CopyFeatures_management(User_Input_TAZLandArea_Shapefile, temp_TAZ)

            # check if area field exists else compute
            my_fieldList = arcpy.ListFields(temp_TAZ)

            my_fieldexists = False
            for my_field in my_fieldList:
                    if my_field.name == my_TAZ_area_name:
                            my_fieldexists = True
                            MWCOG_PrintWriter( "\t\tfound/using existing TAZ area field: " \
                                              + my_TAZ_area_name )
                            break
            if (my_fieldexists): pass
            else:
                    MWCOG_PrintWriter( "\t\tcreating TAZ area field: " \
                                      + my_TAZ_area_name )
                    arcpy.AddField_management(temp_TAZ, my_TAZ_area_name, "DOUBLE",
                                              14, "", "", my_TAZ_area_name)
                    arcpy.CalculateField_management( temp_TAZ, my_TAZ_area_name,
                                                    "!shape.area@SQUAREFEET!", "PYTHON")

        # delete old files
        if arcpy.Exists(short_Temp1)   : arcpy.Delete_management(short_Temp1)
        if arcpy.Exists(short_Temp2)   : arcpy.Delete_management(short_Temp2)
        if arcpy.Exists(short_Temp3)   : arcpy.Delete_management(short_Temp3)
        if arcpy.Exists(short_Temp4)   : arcpy.Delete_management(short_Temp4)
        if arcpy.Exists(short_OutFile) : arcpy.Delete_management(short_OutFile)

        if arcpy.Exists(long_Temp1)    : arcpy.Delete_management(long_Temp1)
        if arcpy.Exists(long_Temp2)    : arcpy.Delete_management(long_Temp2)
        if arcpy.Exists(long_Temp3)    : arcpy.Delete_management(long_Temp3)
        if arcpy.Exists(long_Temp4)    : arcpy.Delete_management(long_Temp4)
        if arcpy.Exists(long_OutFile)  : arcpy.Delete_management(long_OutFile)

        #
        # Process: Buffer & Compact database
        #
        MWCOG_PrintWriter( "\tbuffering ..." )

        arcpy.Buffer_analysis(point_shape, short_Temp1, dist_short,
                              my_buffer_sideType, my_buffer_endType,
                              my_buffer_dissolveType, my_buffer_dissolveField)
        arcpy.Compact_management( my_GDB )

        arcpy.Buffer_analysis(point_shape,  long_Temp1,  dist_long,
                              my_buffer_sideType, my_buffer_endType,
                              my_buffer_dissolveType, my_buffer_dissolveField)
        arcpy.Compact_management( my_GDB )

        #
        # Process: Add a field to dissolve on
        #
        MWCOG_PrintWriter( "\tadding a field for dissolving split buffers ..." )

        arcpy.AddField_management(short_Temp1, my_temp_buffer_dissolve_field_name,
                                  "SHORT", 1, "", "",
                                  my_temp_buffer_dissolve_field_name)
        arcpy.CalculateField_management(short_Temp1,
                                        my_temp_buffer_dissolve_field_name,
                                        "0", "PYTHON", "")

        arcpy.AddField_management( long_Temp1, my_temp_buffer_dissolve_field_name,
                                  "SHORT", 1, "", "",
                                  my_temp_buffer_dissolve_field_name)
        arcpy.CalculateField_management( long_Temp1,
                                        my_temp_buffer_dissolve_field_name,
                                        "0", "PYTHON", "")

        #
        # Process: Dissolve 1
        #
        MWCOG_PrintWriter( "\tdissolving any split buffers ..." )

        arcpy.Dissolve_management(short_Temp1, short_Temp2, "Buf_Dis", "",
                                  "MULTI_PART", "DISSOLVE_LINES")
        arcpy.Compact_management( my_GDB )

        arcpy.Dissolve_management( long_Temp1,  long_Temp2, "Buf_Dis", "",
                                  "MULTI_PART", "DISSOLVE_LINES")
        arcpy.Compact_management( my_GDB )

        #
        # Process: Intersect
        #
        MWCOG_PrintWriter( "\tintersecting ..." )

        arcpy.Intersect_analysis("'" + short_Temp2 + "'" + " #;" + \
                                 "'" + temp_TAZ + "'" + " #",
                                 short_Temp3, "NO_FID", "", "INPUT")
        arcpy.Compact_management( my_GDB )

        arcpy.Intersect_analysis("'" + long_Temp2 + "'" + " #;" + "'" \
                                 + temp_TAZ + "'" + " #",
                                 long_Temp3, "NO_FID", "", "INPUT")
        arcpy.Compact_management( my_GDB )

        #
        # Process: Dissolve 2
        #
        MWCOG_PrintWriter( "\tdissolving ..." )

        arcpy.Dissolve_management(short_Temp3, short_Temp4, my_dissolve_Fields,
                                  "", "MULTI_PART", "DISSOLVE_LINES")
        arcpy.Compact_management( my_GDB )

        arcpy.Dissolve_management( long_Temp3,  long_Temp4, my_dissolve_Fields,
                                  "", "MULTI_PART", "DISSOLVE_LINES")
        arcpy.Compact_management( my_GDB )

        #
        # Process: Join Short-Walk to Zone Layer
        #
        MWCOG_PrintWriter( "\tcomputing short-walk (" + calc_type + ") ..." )

        # join
        # Help: http://help.arcgis.com/en/arcgisdesktop/10.0/help/index.html#//001700000065000000
        arcpy.JoinField_management(temp_TAZ, my_TAZ_name, short_Temp4,
                                   my_TAZ_name, my_join_Fieldlist)



        # construct shape_area name
        # Below method is quirky, but this is how ArcGIS appends fields
        if   my_join_uniq_id == 0:
            my_shape_area_field = "Shape_Area"
        elif my_join_uniq_id == 1:
            my_shape_area_field = "Shape_Area_1"
        elif my_join_uniq_id == 2:
            my_shape_area_field = "Shape_Area_12"
        elif my_join_uniq_id == 3:
            my_shape_area_field = "Shape_Area_12_13"
        elif my_join_uniq_id == 4:
            my_shape_area_field = "Shape_Area_12_13_14"
        elif my_join_uniq_id == 5:
            my_shape_area_field = "Shape_Area_12_13_14_15"
        elif my_join_uniq_id == 6:
            my_shape_area_field = "Shape_Area_12_13_14_15_16"
        else: my_shape_area_field = "UNDEFINED"

        my_join_uniq_id += 1

        # calculate percent walk
        my_calcfield_expression = "CalcAttribute( !" + my_TAZ_area_name \
        + "!, !" + my_shape_area_field + "! )"
        arcpy.AddField_management(temp_TAZ, temp_prefix_short, "DOUBLE", "",
                                  "", "", "", "NULLABLE", "REQUIRED", "")

        # select based on calculation type
        if   (calc_type == "PercentWalk") :
                arcpy.CalculateField_management(temp_TAZ, temp_prefix_short,
                                                my_calcfield_expression, "PYTHON",
                                                my_pctwlk_calcfield_codeblock)
        elif (calc_type == "AreaWalk") :
                arcpy.CalculateField_management(temp_TAZ, temp_prefix_short,
                                                my_calcfield_expression, "PYTHON",
                                                my_arawlk_calcfield_codeblock)
        else:
                arcpy.AddError("ERROR: Un-recognized calc_type specified!")

        # make a note of this field
        my_out_fields.append(temp_prefix_short)

        #
        # Process: Join Long-Walk to Zone Layer
        #
        MWCOG_PrintWriter( "\tcomputing long-walk (" + calc_type + ") ..." )

        # join
        # Help: http://help.arcgis.com/en/arcgisdesktop/10.0/help/index.html#//001700000065000000
        arcpy.JoinField_management(temp_TAZ, my_TAZ_name, long_Temp4,
                                   my_TAZ_name, my_join_Fieldlist)

        # construct shape_area name
        # Below method is quirky, but this is how ArcGIS appends fields
        if   my_join_uniq_id == 0:
            my_shape_area_field = "Shape_Area"
        elif my_join_uniq_id == 1:
            my_shape_area_field = "Shape_Area_1"
        elif my_join_uniq_id == 2:
            my_shape_area_field = "Shape_Area_12"
        elif my_join_uniq_id == 3:
            my_shape_area_field = "Shape_Area_12_13"
        elif my_join_uniq_id == 4:
            my_shape_area_field = "Shape_Area_12_13_14"
        elif my_join_uniq_id == 5:
            my_shape_area_field = "Shape_Area_12_13_14_15"
        elif my_join_uniq_id == 6:
            my_shape_area_field = "Shape_Area_12_13_14_15_16"
        else: my_shape_area_field = "UNDEFINED"

        my_join_uniq_id += 1

        # calculate percent walk
        my_calcfield_expression = "CalcAttribute( !" + my_TAZ_area_name + "!, !" \
        + my_shape_area_field + "! )"
        arcpy.AddField_management(temp_TAZ, temp_prefix_long, "DOUBLE", "", "",
                                  "", "", "NULLABLE", "REQUIRED", "")

        # select based on calculation type
        if   (calc_type == "PercentWalk") :
                arcpy.CalculateField_management(temp_TAZ, temp_prefix_long,
                                                my_calcfield_expression, "PYTHON",
                                                my_pctwlk_calcfield_codeblock)
        elif (calc_type == "AreaWalk") :
                arcpy.CalculateField_management(temp_TAZ, temp_prefix_long,
                                                my_calcfield_expression, "PYTHON",
                                                my_arawlk_calcfield_codeblock)
        else:
                arcpy.AddError("ERROR: Un-recognized calc_type specified!")

        # make a note of this field
        my_out_fields.append(temp_prefix_long)

        #
        # Process: Export Feature Attribute to ASCII...
        #
        if (export==1):
            MWCOG_PrintWriter( "\twriting out " + calc_type + " Unsorted CSV file ..." )
            if arcpy.Exists(User_Output_Walkshed_CSV_File_UnSorted):
                arcpy.Delete_management(User_Output_Walkshed_CSV_File_UnSorted)
            arcpy.ExportXYv_stats(temp_TAZ, my_out_fields, "COMMA",
                                  User_Output_Walkshed_CSV_File_UnSorted,
                                  "ADD_FIELD_NAMES")

            MWCOG_PrintWriter( "\tsorting " + calc_type + " CSV file ..." )
            if arcpy.Exists(User_Output_Walkshed_CSV_File):
                arcpy.Delete_management(User_Output_Walkshed_CSV_File)

            with open(User_Output_Walkshed_CSV_File_UnSorted, "rb") as infile, \
            open(User_Output_Walkshed_CSV_File, "wb") as outfile:
               reader = csv.reader(infile)
               writer = csv.writer(outfile)
               # returns the headers or `None` if the input is empty
               headers = next(reader, None)
               if headers:
                   writer.writerow(headers)
               # 2 specifies that we want to sort according to the third column "TAZID"
               sortedlist = sorted(reader,  key = lambda x:int(x[2]))
               #now write the sorted result into new CSV file
               for row in sortedlist:
                   writer.writerow(row)

            MWCOG_PrintWriter( "\twriting out " + calc_type + " TXT file ..." )
            if arcpy.Exists(User_Output_Walkshed_TXT_File)  :
                arcpy.Delete_management(User_Output_Walkshed_TXT_File)

            # list fields
            fieldList = arcpy.ListFields(User_Output_Walkshed_CSV_File)

            # skip the fields 1,2 & 5  'XCoord, YCoord, TAZ, TAZ_AREA, MTLRTSHR,
            #                  MTLRTLNG, ALLPKSHR, ALLPKLNG, ALLOPSHR, ALLOPLNG'

            FieldsToSkip = ['XCoord', 'YCoord']

            # open input and process

            i = 1
            f = open(User_Output_Walkshed_TXT_File,'w')

            # variables for statistics

            my_num_taz                      = 0
            my_total_taz_area               = 0
            my_total_taz_walk_area          = 0
            my_total_taz_walk_pk_area       = 0
            my_total_taz_walk_op_area       = 0

            my_total_mtlrt_shr_area         = 0
            my_total_mtlrt_lng_area         = 0
            my_total_allpk_shr_area         = 0
            my_total_allpk_lng_area         = 0
            my_total_allop_shr_area         = 0
            my_total_allop_lng_area         = 0

            my_taz_list_zero_land_area      = []
            my_taz_list_zero_walk_area      = []
            my_taz_shr_list_pk_less_than_op = []
            my_taz_lng_list_pk_less_than_op = []

            # write header
            for field in fieldList:
                 if field.name in FieldsToSkip: i += 1
                 else:
                       if field.name == my_TAZ_name.upper() :
                        f.write('%6s'    % field.name)
                       elif i < len(fieldList)              :
                        f.write('%10s'   % field.name)
                       else                                 :
                        f.write('%10s\n' % field.name)
                       i += 1

            # write (copy) data
            rows = arcpy.SearchCursor(User_Output_Walkshed_CSV_File)
            for row in rows:
                i = 1
                my_mtlrt_shr = 0
                my_mtlrt_lng = 0
                my_allpk_shr = 0
                my_allpk_lng = 0
                my_allop_shr = 0
                my_allop_lng = 0

                for field in fieldList:
                     if field.name in FieldsToSkip: i += 1
                     else:
                           # write fields
                           if   field.name == my_TAZ_name.upper()      :
                               f.write('%6d' %  row.getValue(field.name) )
                           elif field.name == my_TAZ_area_name.upper() :
                               f.write('%10.4f' % round((row.getValue(field.name) / (5280*5280)), 4) )
                           elif i < len(fieldList)                     :
                               f.write('%10.4f' % round(row.getValue(field.name), 4))
                           else :
                               f.write('%10.4f\n' % round(row.getValue(field.name), 4))
                           i += 1

                           # save field value for checks
                           if( field.name == "MTLRTSHR" ) :
                               my_mtlrt_shr = round( row.getValue(field.name), 4)
                           if( field.name == "MTLRTLNG" ) :
                               my_mtlrt_lng = round( row.getValue(field.name), 4)
                           if( field.name == "ALLPKSHR" ) :
                               my_allpk_shr = round( row.getValue(field.name), 4)
                           if( field.name == "ALLPKLNG" ) :
                               my_allpk_lng = round( row.getValue(field.name), 4)
                           if( field.name == "ALLOPSHR" ) :
                               my_allop_shr = round( row.getValue(field.name), 4)
                           if( field.name == "ALLOPLNG" ) :
                               my_allop_lng = round( row.getValue(field.name), 4)

                # update stats on fields

                my_num_taz += 1
                my_total_taz_area += round( (row.getValue(my_TAZ_area_name.
                                                          upper()) / (5280*5280)), 4)

                if( row.getValue(my_TAZ_area_name.upper()) == 0 ):
                   my_taz_list_zero_land_area.append( str( row.
                                                          getValue(my_TAZ_name.upper())))

                if( (my_mtlrt_shr + my_mtlrt_lng + my_allpk_shr +
                     my_allpk_lng + my_allop_shr + my_allop_lng) == 0 ):
                   my_taz_list_zero_walk_area.append( str( row.
                                                          getValue(my_TAZ_name.upper())))

                my_total_mtlrt_shr_area +=  my_mtlrt_shr
                my_total_mtlrt_lng_area +=  my_mtlrt_lng
                my_total_allpk_shr_area +=  my_allpk_shr
                my_total_allpk_lng_area +=  my_allpk_lng
                my_total_allop_shr_area +=  my_allop_shr
                my_total_allop_lng_area +=  my_allop_lng

                if( my_allpk_shr < my_allop_shr ):
                   my_taz_shr_list_pk_less_than_op.append( str( row.getValue(my_TAZ_name.upper())))
                if( my_allpk_lng < my_allop_lng ):
                   my_taz_lng_list_pk_less_than_op.append( str( row.getValue(my_TAZ_name.upper())))

                my_total_taz_walk_area    += ( max( my_mtlrt_lng, my_allpk_lng, my_allop_lng ))
                my_total_taz_walk_pk_area += ( max( my_mtlrt_lng, my_allpk_lng               ))
                my_total_taz_walk_op_area += ( max( my_mtlrt_lng,               my_allop_lng ))

            del rows
            f.close()

            # report stats on fields
            if( calc_type == "AreaWalk" ):
                MWCOG_PrintWriter( "\nSUMMARY REPORT:"                                                                                                                                                                                                )
                MWCOG_PrintWriter( "\n\tNumber of TAZ Records in Output                                  : " \
                                  + str('{:,}'.format(my_num_taz))                                                                                                         )
                MWCOG_PrintWriter( "\tTotal TAZ LAND Area                                              : " \
                                  + str('{:9.4f} sq. mi.'.format(my_total_taz_area))                                                                                         )
                if( len(my_taz_list_zero_land_area) == 0 ) :
                        MWCOG_PrintWriter( "\tTAZs with Zero LAND Area                                         : NONE"                                                                                                                                )
                else:
                        MWCOG_PrintWriter( "\tTAZs with Zero LAND Area                                         : " \
                                          + "(Count=" + str(len(my_taz_list_zero_land_area)) + ") " \
                                          + ','.join(sorted(my_taz_list_zero_land_area, key=int)))
                MWCOG_PrintWriter( "\n\tTotal TAZ Long-Walk Area                                         : " \
                                  + str('{:9.4f} sq. mi. ({:6.2%} of TAZ Land Area)'
                                      .format(my_total_taz_walk_area,
                                              my_total_taz_walk_area/my_total_taz_area)))
                MWCOG_PrintWriter( "\tTotal TAZ Long-Walk (Peak Period) Area                           : " \
                                  + str('{:9.4f} sq. mi. ({:6.2%} of TAZ Land Area)'
                                      .format(my_total_taz_walk_pk_area,
                                              my_total_taz_walk_pk_area/my_total_taz_area)))
                MWCOG_PrintWriter( "\tTotal TAZ Long-Walk (Off-Peak Period) Area                       : " \
                                  + str('{:9.4f} sq. mi. ({:6.2%} of TAZ Land Area)'
                                      .format(my_total_taz_walk_op_area,
                                              my_total_taz_walk_op_area/my_total_taz_area)))
                if( len(my_taz_list_zero_walk_area) == 0 ) :
                        MWCOG_PrintWriter( "\tTAZs with Zero WALK Area                                         : NONE"                                                                                                                                )
                else:
                        MWCOG_PrintWriter( "\tTAZs with Zero WALK Area                                         : " \
                                          + "(Count=" + str(len(my_taz_list_zero_walk_area)) \
                                          + ") " + ','.join(sorted(my_taz_list_zero_walk_area,
                                                                   key=int)))
                MWCOG_PrintWriter( "\n\tTotal MTLRTSHR Area                                              : " \
                                  + str('{:9.4f} sq. mi. ({:6.2%} of TAZ Land Area)'
                                        .format(my_total_mtlrt_shr_area,
                                                my_total_mtlrt_shr_area/my_total_taz_area)))
                MWCOG_PrintWriter( "\tTotal MTLRTLNG Area                                              : " \
                                  + str('{:9.4f} sq. mi. ({:6.2%} of TAZ Land Area)'
                                        .format(my_total_mtlrt_lng_area,
                                                my_total_mtlrt_lng_area/my_total_taz_area)))
                MWCOG_PrintWriter( "\tTotal ALLPKSHR Area                                              : " \
                                  + str('{:9.4f} sq. mi. ({:6.2%} of TAZ Land Area)'
                                        .format(my_total_allpk_shr_area,
                                                my_total_allpk_shr_area/my_total_taz_area)))
                MWCOG_PrintWriter( "\tTotal ALLPKLNG Area                                              : " \
                                  + str('{:9.4f} sq. mi. ({:6.2%} of TAZ Land Area)'
                                        .format(my_total_allpk_lng_area,
                                                my_total_allpk_lng_area/my_total_taz_area)))
                MWCOG_PrintWriter( "\tTotal ALLOPSHR Area                                              : " \
                                  + str('{:9.4f} sq. mi. ({:6.2%} of TAZ Land Area)'
                                        .format(my_total_allop_shr_area,
                                                my_total_allop_shr_area/my_total_taz_area)))
                MWCOG_PrintWriter( "\tTotal ALLOPLNG Area                                              : " \
                                  + str('{:9.4f} sq. mi. ({:6.2%} of TAZ Land Area)'
                                        .format(my_total_allop_shr_area,
                                                my_total_allop_shr_area/my_total_taz_area)))
                if( len(my_taz_shr_list_pk_less_than_op) == 0 ) :
                        MWCOG_PrintWriter( "\n\tTAZs with Short-Walk Less in Peak Period than in Off-Peak Period : NONE" )
                else:
                        MWCOG_PrintWriter( "\n\tTAZs with Short-Walk Less in Peak Period than in Off-Peak Period : " \
                                          + "(Count=" + str(len(my_taz_shr_list_pk_less_than_op)) + ") " \
                                          + ','.join(sorted(my_taz_shr_list_pk_less_than_op, key=int))      )
                if( len(my_taz_lng_list_pk_less_than_op) == 0 ) :
                        MWCOG_PrintWriter( "\tTAZs with  Long-Walk Less in Peak Period than in Off-Peak Period : NONE\n" )
                else:
                        MWCOG_PrintWriter( "\tTAZs with  Long-Walk  Less in Peak Period than in Off-Peak Period : " \
                                          + "(Count=" + str(len(my_taz_lng_list_pk_less_than_op)) \
                                          + ") " + ','.join(sorted(my_taz_lng_list_pk_less_than_op, key=int)) + "\n" )
    except:
        #
        # Get the traceback object
        #
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        #
        # Concatenate information together concerning the error into a message string
        #
        pymsg = "PYTHON ERRORS:\nTraceback info:\n" + tbinfo + "\nError Info:\n"\
         + str(sys.exc_info()[1])
        msgs = "ArcPy ERRORS:\n" + arcpy.GetMessages(2) + "\n\n\n"

        #
        # Return python error messages for use in script tool or Python Window
        #
        arcpy.AddError(pymsg)
        arcpy.AddError(msgs)

        return 1

    myendtime     = datetime.datetime.now()
    myelapsedtime = myendtime - mystarttime
    MWCOG_PrintWriter( "\tMWCOG_BufferAndExport (Finished: "\
                      + str(myendtime).split(".")[0] + ", Elapsed: " \
                      + str(myelapsedtime).split(".")[0] + ")" )
    return 0
示例#57
0
        def onReadable(self, fileno):
            self.logMessage('Reading...')
            requestHeader = b""
            requestBody = b""
            requestComplete = False
            requestPart = ""
            try:
                requestPart = self.connectionSocket.recv(self.bufferSize)
                self.logMessage('Just received... %d bytes in this part' %
                                len(requestPart))
                self.requestSoFar += requestPart
                endOfHeader = self.requestSoFar.find(b'\r\n\r\n')
                if self.expectedRequestSize > 0:
                    self.logMessage(
                        'received... %d of %d expected' %
                        (len(self.requestSoFar), self.expectedRequestSize))
                    if len(self.requestSoFar) >= self.expectedRequestSize:
                        requestHeader = self.requestSoFar[:endOfHeader + 2]
                        requestBody = self.requestSoFar[4 + endOfHeader:]
                        requestComplete = True
                else:
                    if endOfHeader != -1:
                        self.logMessage('Looking for content in header...')
                        contentLengthTag = self.requestSoFar.find(
                            b'Content-Length:')
                        if contentLengthTag != -1:
                            tag = self.requestSoFar[contentLengthTag:]
                            numberStartIndex = tag.find(b' ')
                            numberEndIndex = tag.find(b'\r\n')
                            contentLength = int(
                                tag[numberStartIndex:numberEndIndex])
                            self.expectedRequestSize = 4 + endOfHeader + contentLength
                            self.logMessage(
                                'Expecting a body of %d, total size %d' %
                                (contentLength, self.expectedRequestSize))
                            if len(requestPart) == self.expectedRequestSize:
                                requestHeader = requestPart[:endOfHeader + 2]
                                requestBody = requestPart[4 + endOfHeader:]
                                requestComplete = True
                        else:
                            self.logMessage(
                                'Found end of header with no content, so body is empty'
                            )
                            requestHeader = self.requestSoFar[:-2]
                            requestComplete = True
            except socket.error as e:
                print('Socket error: ', e)
                print('So far:\n', self.requestSoFar)
                requestComplete = True

            if len(requestPart) == 0 or requestComplete:
                self.logMessage(
                    'Got complete message of header size %d, body size %d' %
                    (len(requestHeader), len(requestBody)))
                self.readNotifier.disconnect('activated(int)', self.onReadable)
                self.readNotifier.setEnabled(False)
                qt.QTimer.singleShot(0, self.onReadableComplete)

                if len(self.requestSoFar) == 0:
                    self.logMessage("Ignoring empty request")
                    return

                method, uri, version = [b'GET', b'/', b'HTTP/1.1']  # defaults
                requestLines = requestHeader.split(b'\r\n')
                self.logMessage(requestLines[0])
                try:
                    method, uri, version = requestLines[0].split(b' ')
                except ValueError as e:
                    self.logMessage(
                        "Could not interpret first request lines: ",
                        requestLines)

                if requestLines == "":
                    self.logMessage(
                        "Assuming empty string is HTTP/1.1 GET of /.")

                if version != b"HTTP/1.1":
                    self.logMessage("Warning, we don't speak %s", version)
                    return

                # TODO: methods = ["GET", "POST", "PUT", "DELETE"]
                methods = [b"GET", b"POST", b"PUT"]
                if method not in methods:
                    self.logMessage("Warning, we only handle %s" % methods)
                    return

                parsedURL = urllib.parse.urlparse(uri)
                request = parsedURL.path
                if parsedURL.query != b"":
                    request += b'?' + parsedURL.query
                self.logMessage('Parsing url request: ', parsedURL)
                self.logMessage(' request is: %s' % request)

                highestConfidenceHandler = None
                highestConfidence = 0.0
                for handler in self.requestHandlers:
                    confidence = handler.canHandleRequest(uri, requestBody)
                    if confidence > highestConfidence:
                        highestConfidenceHandler = handler
                        highestConfidence = confidence

                if highestConfidenceHandler is not None and highestConfidence > 0.0:
                    try:
                        contentType, responseBody = highestConfidenceHandler.handleRequest(
                            uri, requestBody)
                    except:
                        etype, value, tb = sys.exc_info()
                        import traceback
                        for frame in traceback.format_tb(tb):
                            self.logMessage(frame)
                        self.logMessage(etype, value)
                        contentType = b'text/plain'
                        responseBody = b'Server error'  # TODO: send correct error code in response
                else:
                    contentType = b'text/plain'
                    responseBody = b''

                if responseBody:
                    self.response = b"HTTP/1.1 200 OK\r\n"
                    self.response += b"Access-Control-Allow-Origin: *\r\n"
                    self.response += b"Content-Type: %s\r\n" % contentType
                    self.response += b"Content-Length: %d\r\n" % len(
                        responseBody)
                    self.response += b"Cache-Control: no-cache\r\n"
                    self.response += b"\r\n"
                    self.response += responseBody
                else:
                    self.response = b"HTTP/1.1 404 Not Found\r\n"
                    self.response += b"\r\n"

                self.toSend = len(self.response)
                self.sentSoFar = 0
                fileno = self.connectionSocket.fileno()
                self.writeNotifier = qt.QSocketNotifier(
                    fileno, qt.QSocketNotifier.Write)
                self.writeNotifier.connect('activated(int)', self.onWritable)
示例#58
0
def computeReflectance():
    try:
        global _path, _band1Filename, _band2Filename, _band3Filename, _band4Filename
        global _d, _ESUN_B1, _ESUN_B2, _ESUN_B3, _ESUN_B4, _theta
        global _radiancefolder, _reflectancefolder

        #Band1
        dsB1 = gdal.Open(
            os.path.join(_path, _radiancefolder, _radiancefolder + "_" +
                         _band1Filename))  #Go the radiance folder
        band1 = dsB1.GetRasterBand(1)
        npB1 = np.array(band1.ReadAsArray())

        #Save Reflectance Image 1
        npB1Reflectance = npB1 * (math.pi * math.pow(float(_d), 2)) / float(
            _ESUN_B1) * math.sin(float(_theta))  #Compute reflectance image
        rows = npB1.shape[0]  #Original rows
        cols = npB1.shape[1]  #Original cols
        trans = dsB1.GetGeoTransform(
        )  #Get transformation information from the original file
        proj = dsB1.GetProjection()  #Get Projection Information
        nodatav = band1.GetNoDataValue()  # Get No Data Value
        outputRasterFile = os.path.join(_path, _reflectancefolder,
                                        _reflectancefolder + "_" +
                                        _band1Filename)  #output file
        Utilities.checkIfDirectoryExists(
            os.path.join(_path,
                         _reflectancefolder))  #Check if directory exists
        WriteRaster.writeTIFF(rows, cols, trans, proj, nodatav,
                              npB1Reflectance,
                              outputRasterFile)  #Write file to disk
        dsB1 = band1 = npB1 = npB1Reflectance = None  #Free memory

        #Band2
        dsB2 = gdal.Open(
            os.path.join(_path, _radiancefolder,
                         _radiancefolder + "_" + _band2Filename))
        band2 = dsB2.GetRasterBand(1)
        npB2 = np.array(band2.ReadAsArray())

        #Save Reflectance Image 2
        npB2Reflectance = npB2 * (math.pi * math.pow(float(_d), 2)) / float(
            _ESUN_B2) * math.sin(float(_theta))  #Compute reflectance image
        rows = npB2.shape[0]  #Original rows
        cols = npB2.shape[1]  #Original cols
        trans = dsB2.GetGeoTransform(
        )  #Get transformation information from the original file
        proj = dsB2.GetProjection()  #Get Projection Information
        nodatav = band2.GetNoDataValue()  # Get No Data Value
        outputRasterFile = os.path.join(_path, _reflectancefolder,
                                        _reflectancefolder + "_" +
                                        _band2Filename)  #output file
        Utilities.checkIfDirectoryExists(
            os.path.join(_path,
                         _reflectancefolder))  #Check if directory exists
        WriteRaster.writeTIFF(rows, cols, trans, proj, nodatav,
                              npB2Reflectance,
                              outputRasterFile)  #Write file to disk
        dsB2 = band2 = npB2 = npB2Reflectance = None  #Free memory

        #Band3
        dsB3 = gdal.Open(
            os.path.join(_path, _radiancefolder,
                         _radiancefolder + "_" + _band3Filename))
        band3 = dsB3.GetRasterBand(1)
        npB3 = np.array(band3.ReadAsArray())

        #Save Reflectance Image 3
        npB3Reflectance = npB3 * (math.pi * math.pow(float(_d), 2)) / float(
            _ESUN_B3) * math.sin(float(_theta))  #Compute reflectance image
        rows = npB3.shape[0]  #Original rows
        cols = npB3.shape[1]  #Original cols
        trans = dsB3.GetGeoTransform(
        )  #Get transformation information from the original file
        proj = dsB3.GetProjection()  #Get Projection Information
        nodatav = band3.GetNoDataValue()  # Get No Data Value
        outputRasterFile = os.path.join(_path, _reflectancefolder,
                                        _reflectancefolder + "_" +
                                        _band3Filename)  #output file
        Utilities.checkIfDirectoryExists(
            os.path.join(_path,
                         _reflectancefolder))  #Check if directory exists
        WriteRaster.writeTIFF(rows, cols, trans, proj, nodatav,
                              npB3Reflectance,
                              outputRasterFile)  #Write file to disk
        dsB3 = band3 = npB3 = npB3Reflectance = None  #Free memory

        #Band4
        dsB4 = gdal.Open(
            os.path.join(_path, _radiancefolder,
                         _radiancefolder + "_" + _band4Filename))
        band4 = dsB4.GetRasterBand(1)
        npB4 = np.array(band4.ReadAsArray())

        #Save Reflectance Image 4
        npB4Reflectance = npB4 * (math.pi * math.pow(float(_d), 2)) / float(
            _ESUN_B4) * math.sin(float(_theta))  #Compute reflectance image
        rows = npB4.shape[0]  #Original rows
        cols = npB4.shape[1]  #Original cols
        trans = dsB4.GetGeoTransform(
        )  #Get transformation information from the original file
        proj = dsB4.GetProjection()  #Get Projection Information
        nodatav = band4.GetNoDataValue()  # Get No Data Value
        outputRasterFile = os.path.join(_path, _reflectancefolder,
                                        _reflectancefolder + "_" +
                                        _band4Filename)  #output file
        Utilities.checkIfDirectoryExists(
            os.path.join(_path,
                         _reflectancefolder))  #Check if directory exists
        WriteRaster.writeTIFF(rows, cols, trans, proj, nodatav,
                              npB4Reflectance,
                              outputRasterFile)  #Write file to disk
        dsB4 = band4 = npB4 = npB4Reflectance = None  #Free memory

    except:
        ## Return any Python specific errors and any error returned
        tb = sys.exc_info()[2]
        tbinfo = traceback.format_tb(tb)[0]

        pymsg = "PYTHON ERRORS:\n  Function def computeCloud() \n" + tbinfo + "\nError Info:\n    " + \
                str(sys.exc_type)+ ": " + str(sys.exc_value) + "\n"
        ##Write to the error log file
        logger_error.info(pymsg)
    return ""
示例#59
0
 def __repr__(self):
     if self.has_exception:
         return "Attempts: {0}, Error:\n{1}".format(self.attempt_number, "".join(traceback.format_tb(self.value[2])))
     else:
         return "Attempts: {0}, Value: {1}".format(self.attempt_number, self.value)
示例#60
0
def get_traceback():
    exc, msg, tb = sys.exc_info()
    if exc:
        return '\n'.join(traceback.format_tb(tb))
    else:
        return None