Ejemplo n.º 1
0
def unregister_ui(review_ui):
    """Unregister a Review UI class.

    This will unregister a previously registered Review UI.

    Only FileAttachmentReviewUI subclasses are supported. The class must
    have been registered beforehand or a ValueError will be thrown.

    Args:
        review_ui (type):
            The Review UI to unregister. This must be a subclass of
            :py:class:`FileAttachmentReviewUI`, and must have been registered
            before.

    Raises:
        TypeError:
            The provided Review UI class is not of a compatible type.

        ValueError:
            The provided Review UI was not previously registered.
    """
    if not issubclass(review_ui, FileAttachmentReviewUI):
        raise TypeError('Only FileAttachmentReviewUI subclasses can be '
                        'unregistered')

    try:
        _file_attachment_review_uis.remove(review_ui)
    except ValueError:
        logging.error('Failed to unregister missing review UI %r' %
                      review_ui)
        raise ValueError('This review UI was not previously registered')
Ejemplo n.º 2
0
    def massif_contour(self, data):
        """
        @param data:
        """

        if self.fig is None:
            logging.error("No diffraction image available => not showing the contour")
        else:
            tmp = 100 * (1 - data.astype("uint8"))
            mask = numpy.zeros((data.shape[0], data.shape[1], 4), dtype="uint8")

            mask[:, :, 0] = tmp
            mask[:, :, 1] = tmp
            mask[:, :, 2] = tmp
            mask[:, :, 3] = tmp
            while len(self.msp.images) > 1:
                self.msp.images.pop()
            try:
                xlim, ylim = self.ax.get_xlim(), self.ax.get_ylim()
                self.msp.imshow(mask, cmap="gray", origin="lower", interpolation="nearest")
                self.ax.set_xlim(xlim);self.ax.set_ylim(ylim);
            except MemoryError:
                logging.error("Sorry but your computer does NOT have enough memory to display the massif plot")
            # self.fig.show()
            self.fig.canvas.draw()
Ejemplo n.º 3
0
    def Adjustment(self):
        """ adjustment & and blunder removing

            :returns: adjusted coordinates or None
        """
        # adjustment loop
        last_res = None
        while True:
            res, blunder = self.g.adjust()
            if res is None or not 'east' in res[0] or not 'north' in res[0] or \
                              not 'elev' in res[0]:
                # adjustment faild or too many blunders
                if not last_res is None:
                    logging.warning("blunders are not fully removed")
                    res = last_res
                else:
                    logging.error("adjustment failed")
                break
            elif blunder['std-residual'] < 1.0:
                logging.info("blunders removed")
                break
            else:
                logging.info("%s - %s observation removed" % (blunder['from'], blunder['to']))
                self.g.remove_observation(blunder['from'], blunder['to'])
                last_res = res
        return res
Ejemplo n.º 4
0
  def _DetectExecutedBinaries(file_name):
    """Detect executed binaries from an strace output file.

    Args:
      file_name: An strace output file.

    Returns:
      A set of the binaries executed during an strace run.
    """

    executed_binaries = set()
    try:
      with open(file_name, 'r') as strace_file:
        for line in strace_file:
          match = BinaryLauncher._EXEC_EXPRESSION.search(line)
          if match:
            binary = match.group(1)
            if binary[0] != '/':
              logging.error('Expecting an absolute path to a binary, found %s '
                            'instead.', binary)
            else:
              executed_binaries.add(binary)
        strace_file.close()
        return executed_binaries
    except (IOError, OSError) as err:
      logging.error('Could not read strace file %s: %s', file_name, err)
      return executed_binaries
Ejemplo n.º 5
0
    def contour(self, data):
        """
        Overlay a contour-plot

        @param data: 2darray with the 2theta values in radians...
        """
        if self.fig is None:
            logging.warning("No diffraction image available => not showing the contour")
        else:
            while len(self.msp.images) > 1:
                self.msp.images.pop()
            while len(self.ct.images) > 1:
                self.ct.images.pop()
            while len(self.ct.collections) > 0:
                self.ct.collections.pop()

            if self.points.dSpacing and  self.points._wavelength:
                angles = list(2.0 * numpy.arcsin(5e9 * self.points._wavelength / numpy.array(self.points.dSpacing)))
            else:
                angles = None
            try:
                xlim, ylim = self.ax.get_xlim(), self.ax.get_ylim()
                self.ct.contour(data, levels=angles)
                self.ax.set_xlim(xlim);self.ax.set_ylim(ylim);
                print("Visually check that the curve overlays with the Debye-Sherrer rings of the image")
                print("Check also for correct indexing of rings")
            except MemoryError:
                logging.error("Sorry but your computer does NOT have enough memory to display the 2-theta contour plot")
            self.fig.show()
Ejemplo n.º 6
0
def gql_json_parser(query_obj, form_id):
    all_components = []
    for e in query_obj:
    	if e.input_type == 'radiobuttons' or e.input_type == 'checkbox' and e.options:
    		logging.error(e.options)
    		opts = json.loads(e.options)
    		radio_values = []
    		for elm in opts:
    			if e.input_type == 'radiobuttons':
    				field = {"type": "radio"}
    			if e.input_type == 'checkbox':
    				field = {"type": "checkbox"}
    			field["name"] = str(e.key().id())
    			field["caption"] = elm.capitalize()
    			field["value"] = elm
    			field["id"] = elm
    			radio_values.append(field)
    		form_components = { "type": 'div',"data-role": 'fieldcontain',
    							"html": { "type": 'fieldset',"data-role": "controlgroup", "caption": e.caption, "data-type": "horizontal", "data-mini": "true", "html":  radio_values }}
    	elif e.input_type == "h2":
    		form_components = { "type": e.input_type, "html": e.caption}
    	else:
    		form_components = {"name": str(e.key().id()), "id": str(e.key().id()), "type": e.input_type, "caption": e.caption}
    	if e.input_type == 'file':
    		form_components['class'] = 'image_file'
    	all_components.append(form_components)
    all_components.append({"type": "hidden","name": "form_id", "value": str(form_id)})
    all_components.append({"type": "submit", "value": "Spara checklistan!"})
    return all_components
Ejemplo n.º 7
0
def handle(data, con, apikey=None):
  d = json.loads(data)

  handlers = {'import': importit, 'ping': ping,
      'listimported': listimported, 'slice': sliceit,
      'listdone': listdone, 'getdone': getdone,
      'importconfig': importconfig, 'listconfig': listconfigs,
      'listprogress': listprogress, 'getstats': getstats,
      'journal': getjournal, 'del': wipefile, 'wait': waitfor}

  hndlr = noop
  cmd = 'noop'
  if d.has_key('cmd'):
    if d['cmd'] in handlers.keys():
      cmd = d['cmd']
      hndlr = handlers[cmd]

  logging.info('cmd: ' + cmd)

  if not apikey is None:
    if not (d.has_key('key') and d['key'] == apikey):
      logging.info('authentication failed for "{}" key!'.format(
        '' if not d.has_key('key') else d['key']))
      return json.dumps({'r': 'fail',
        'm': 'authentication failed. incorrect apikey'})

  try:
    r = hndlr(d, con)
    result = json.dumps(r)
  except Exception as e:
    logging.error(str(e))
    result = json.dumps({u'm':unicode(e), u'r':u'fail'})
  logaccess(len(data), len(result), unicode(cmd), con)

  return result
Ejemplo n.º 8
0
def compile(input, output=None, flags=None):
    """Prepares command-line call to Closure Compiler.

    Args:
      source_paths: Source paths to build, in order.

    Returns:
      The compiled source, as a string, or None if compilation failed.
    """

    # User friendly version check.
    if not (distutils.version.LooseVersion(_GetJavaVersion()) >=
                distutils.version.LooseVersion('1.6')):
        logging.error('Requires Java 1.6 or higher. '
                      'Please visit http://www.java.com/getjava')
        return

    svn.try_lock(output)

    args = ['java', '-jar', os.path.dirname(__file__) + '/lib/yuicompressor-2.4.7.jar', input, '--line-break', '1000',
            '--charset', 'gb2312']

    if output:
        args += ['-o', output]

    if flags:
        args += flags

    command.run(' '.join(args), show_log=True)

    return output
Ejemplo n.º 9
0
 def execute(self, email_models):
     logging.debug("In Destiny::execute()")
     if not email_models:
         return
     emails_id = []
     destinations = {}
     for destiny in self._plugins.keys():
         destinations.setdefault(destiny, email_models[-1].get(destiny))
         emails_id.append(email_models[-1].email_id())
     for email_model in email_models[:-1]:
         for destiny in self._plugins.keys():
             d_tables = destinations.get(destiny).get("tables")
             for d_table in d_tables:
                 for k, v in d_table.iteritems():
                     m_tables = email_model.get(destiny).get("tables")
                     for m_table in m_tables:
                         if k in m_table:
                             d_table.setdefault(k, []).extend(m_table[k])
         emails_id.append(email_model.email_id())
     for destiny, models in destinations.iteritems():
         for forward in self._plugins.get(destiny):
             try:
                 forward.execute(models)
             except Exception, e:
                 logging.error("!! Error-execute: %s" % (str(e),))
                 logging.info("Add emails in queure error: %s" % str(emails_id))
                 for email_id in emails_id:
                     self.add_email_error(email_id)
                 continue
Ejemplo n.º 10
0
    def post_config_change(self, method):
        route = CsRoute()
        if method == "add":
            route.add_table(self.dev)
            route.add_route(self.dev, str(self.address["network"]))
        elif method == "delete":
            logging.warn("delete route not implemented")

        self.fw_router()
        self.fw_vpcrouter()

        # On deletion nw_type will no longer be known
        if self.get_type() in ["guest"] and self.config.is_vpc():

            CsDevice(self.dev, self.config).configure_rp()

            logging.error(
                "Not able to setup source-nat for a regular router yet")
            dns = CsDnsmasq(self)
            dns.add_firewall_rules()
            app = CsApache(self)
            app.setup()

        cmdline = self.config.cmdline()
        # If redundant then this is dealt with by the master backup functions
        if self.get_type() in ["guest"] and not cmdline.is_redundant():
            pwdsvc = CsPasswdSvc(self.address['public_ip']).start()

        if self.get_type() == "public" and self.config.is_vpc():
            if self.address["source_nat"]:
                vpccidr = cmdline.get_vpccidr()
                self.fw.append(
                    ["filter", "", "-A FORWARD -s %s ! -d %s -j ACCEPT" % (vpccidr, vpccidr)])
                self.fw.append(
                    ["nat", "", "-A POSTROUTING -j SNAT -o %s --to-source %s" % (self.dev, self.address['public_ip'])])
Ejemplo n.º 11
0
def remove(name):
    """
    Remove a snippet with a given name
    If there is no such snippet, return '404: Snippet Not Found'.
    """
    logging.error("FIXME: Unimplemented - remove({!r}".format(name))
    return ""
Ejemplo n.º 12
0
def gather(suffix, options):
    url = options.get("url")
    if url is None:
        logging.warn("A --url is required. (Can be a local path.)")
        exit(1)

    # remote URL
    if url.startswith("http:") or url.startswith("https:"):
        # Though it's saved in cache/, it will be downloaded every time.
        remote_path = os.path.join(utils.cache_dir(), "url.csv")

        try:
            response = requests.get(url)
            utils.write(response.text, remote_path)
        except:
            logging.error("Remote URL not downloaded successfully.")
            print(utils.format_last_exception())
            exit(1)

    # local path
    else:
        remote_path = url

    for domain in utils.load_domains(remote_path):
        yield domain
Ejemplo n.º 13
0
    def call(self, function, params=None):
        self.requestPerMinute += 1
        now = datetime.utcnow()

        if self.requestPerMinute >= self.requestLimit:
            waittime = 60 - now.second
            logging.warning("Limit for request per minute exceeded. Waiting for: {0} sec.".format(waittime))
            time.sleep(waittime)
            now = datetime.utcnow()

        if self.checkMinute != now.minute:
            self.requestPerMinute = 0
            self.checkMinute = now.minute

        payload = ''
        try:
            p = "" if not params else '?' + "&".join(
                ["{key}={value}".format(key=k, value=v) for (k, v) in params.iteritems()])
            url = "{base}.{func}{params}".format(base=self.baseConfig["url"], func=function, params=p)
            logging.debug("{0} {1} API call:{2}".format(self.checkMinute, self.requestPerMinute, url))
            request = urllib2.Request(url, None, self.baseConfig["headers"])
            stream = urllib2.urlopen(request)
            payload = stream.read()
            data = json.loads(payload)
            if isinstance(data, dict) and 'ruid' in data:
                logging.error('Api call failed with error: {0} Code: {1}'.format(data['message'], data['code']))
                return None
            return data

        except Exception as e:
            logging.error('Error: {0} Context: {1}'.format(e, payload))
            return None
Ejemplo n.º 14
0
	def get(self):
		
		alertFlag = None
		dynamicFlag = True
		userID = str(self.user_info['user_id'])
		alertHTML = ""
		q = Alert.all()
		q.filter('userID =', userID)
		for alert in q:
			
			alertFlag=True
			if alert.trafficAlert == 'email':
				contact = alert.email
			else:
				contact = "(" + alert.areacode + ")" + " " + alert.prefix + "-" + alert.suffix
					
			alertHTML += "<tr id='row" + str(alert.key()) + "'>"
			alertHTML += "<td><input class='alertCheckbox' type='checkbox' id='" + str(alert.key()) + "'/></td>"
			alertHTML += "<td>" + contact + "</td></tr>"
			
		
		logging.error(alertHTML)
		template_values = {'alertHTML': alertHTML}
		params = { 'bodyID' : 'alertsPage', 'alertHTML' : alertHTML, 'alerts' : alertFlag, 'dynamic' : dynamicFlag }
		self.render_template('alerts.html',params)
Ejemplo n.º 15
0
 def act(self, force_act=False, action=None, skip_responses=False):
     """
     returns:
         (action, response) tuple.  response type depends on the action that was performed.
     """        
     if not force_act:
         config = ConfigurationAccessor.get_or_create()
         if config and (config.is_tweeting is not None) and (not safe_int(config.is_tweeting)):
             logging.debug("config.is_tweeting is False; hiding")
             return ()
     
     result = []
     responded = False
     if not skip_responses:
         try:
             direct, response = self.respond()
             if (direct or response):
                 # a response to a direct message or mention was generated
                 responded = True
                 if direct:
                     result.append(direct.AsDict())
                 if response:
                     result.append(response.AsDict())
         except Exception, e:
             logging.error(e)
Ejemplo n.º 16
0
def buildLinkBlock( linkDoc, context, dataType ):
	dynamicHTML = "<div class='quotationrContainer'>"
	dynamicHTML += "<p class='quotationrSource'>"
	
	if context == "tags":
		dynamicHTML+= "<a class='quoteSource' href='/users/" + linkDoc.link + "'>" + linkDoc.title + "</a>"
	else:
		dynamicHTML+= "<a class='quoteSource' href='/users/" + linkDoc.name + "/link/" + linkDoc.urlHash + "'>" + linkDoc.title + "</a>"
		
	logging.error(linkDoc.favicon)
	logging.error(linkDoc.link)
	dynamicHTML+= "<a href='" + linkDoc.link + "'><img class='favicon' onerror='if (this.src != &quot;/images/icon16.png&quot;) this.src = &quot;/images/icon16.png&quot;;' width=16 src='" + linkDoc.favicon + "' /></a>"
	dynamicHTML += "</p>"
	tagHTML = ""
	
	
	#if dataType == "tags":
	for tag in linkDoc.tags.split(' '):
		tagHTML += "<a href='/tags/" + tag + "'>" + tag + "</a>"
	
	dynamicHTML += "<div class='quotationrWidget'>"
	
	

	dynamicHTML += "<span class='quotationrTags'>"
	dynamicHTML += tagHTML + "</span>"
	dynamicHTML += "</p>"
	
	dynamicHTML += "</div>"
	dynamicHTML+= "</div>"
	dynamicHTML+= "<hr />"
	return dynamicHTML
Ejemplo n.º 17
0
	def post(self):
		username = self.request.get('quotationrEmail')

		user = self.user_model.get_by_auth_id(username)
		if not user:
			logging.info('Could not find any user entry for username %s', username)
			self.response.out.write('fail:::cant find email')
			return

		user_id = user.get_id()
		token = self.user_model.create_signup_token(user_id)

		verification_url = self.uri_for('verification', type='p', user_id=user_id,
			signup_token=token, _full=True)

		
		logging.error(verification_url)
		
		mail.send_mail(sender="Quotationr <*****@*****.**>",
                to=user.email_address,
                subject="Reset Your Quotationr Password",
                body="Please click the following link to reset your Quotationr password:\n\n " + verification_url)
		
		#self.response.out.write('success:::' + user.email_address)
		self.response.out.write('success:::email sent')
Ejemplo n.º 18
0
Archivo: dist.py Proyecto: louiz/botan
    def content_rewriter():
        for line in contents:

            if target_version != "HEAD":
                match = version_re.match(line)
                if match:
                    name_to_idx = {"major": 0, "minor": 1, "patch": 2}
                    in_tag = int(version_parts[name_to_idx[match.group(1)]])
                    in_file = int(match.group(2))

                    if in_tag != in_file:
                        logging.error(
                            'Version number part "%s" in botan_version.py does not match tag %s'
                            % (match.group(1), target_version)
                        )
                        raise Exception("Bad botan_version.py")

            if line == "release_vc_rev = None\n":
                yield "release_vc_rev = 'git:%s'\n" % (rev_id)
            elif line == "release_datestamp = 0\n":
                yield "release_datestamp = %d\n" % (rel_date)
            elif line == "release_type = 'unreleased'\n":
                if args[0] == "snapshot":
                    yield "release_type = 'snapshot'\n"
                else:
                    yield "release_type = 'released'\n"
            else:
                yield line
Ejemplo n.º 19
0
  def submit_job_description(self, job):
    """Creates and excutes a job request."""
    request = dataflow.DataflowProjectsLocationsJobsCreateRequest()
    request.projectId = self.google_cloud_options.project
    request.location = self.google_cloud_options.region
    request.job = job.proto

    try:
      response = self._client.projects_locations_jobs.Create(request)
    except exceptions.BadStatusCodeError as e:
      logging.error('HTTP status %d trying to create job'
                    ' at dataflow service endpoint %s',
                    e.response.status,
                    self.google_cloud_options.dataflow_endpoint)
      logging.fatal('details of server error: %s', e)
      raise
    logging.info('Create job: %s', response)
    # The response is a Job proto with the id for the new job.
    logging.info('Created job with id: [%s]', response.id)
    logging.info(
        'To access the Dataflow monitoring console, please navigate to '
        'https://console.developers.google.com/project/%s/dataflow/job/%s',
        self.google_cloud_options.project, response.id)

    return response
Ejemplo n.º 20
0
    def generateCreateFailedReports(self, createFailedJobs):
        """
        _generateCreateFailedReports_

        Create and store FWJR for the  jobs that failed on creation
        leaving meaningful information about what happened with them
        """
        if not createFailedJobs:
            return

        fjrsToSave = []
        for failedJob in createFailedJobs:
            report = Report()
            defaultMsg = "There is a condition which assures that this job will fail if it's submitted"
            report.addError("CreationFailure", 99305, "CreationFailure", failedJob.get("failedReason", defaultMsg))
            jobCache = failedJob.getCache()
            try:
                fjrPath = os.path.join(jobCache, "Report.0.pkl")
                report.save(fjrPath)
                fjrsToSave.append({"jobid": failedJob["id"], "fwjrpath": fjrPath})
                failedJob["fwjr"] = report
            except Exception:
                logging.error("Something went wrong while saving the report for  job %s" % failedJob["id"])

        myThread = threading.currentThread()
        self.setFWJRPath.execute(binds = fjrsToSave, conn = myThread.transaction.conn, transaction = True)

        return
Ejemplo n.º 21
0
    def clean(self):
        """this form must always be valid
        should use defaults if the data is incomplete
        or invalid"""
        if self._errors:
            #since the form is always valid, clear the errors
            logging.error(str(self._errors))
            self._errors = {}

        in_data = self.get_pruned_data()
        out_data = dict()
        if ('answer' in in_data) ^ ('comment' in in_data):
            out_data['is_permalink'] = True
            out_data['show_page'] = None
            out_data['answer_sort_method'] = 'votes'
            out_data['show_comment'] = in_data.get('comment', None)
            out_data['show_answer'] = in_data.get('answer', None)
        else:
            out_data['is_permalink'] = False
            out_data['show_page'] = in_data.get('page', 1)
            out_data['answer_sort_method'] = in_data.get(
                                                    'sort',
                                                    self.default_sort_method
                                                )
            out_data['show_comment'] = None
            out_data['show_answer'] = None
        self.cleaned_data = out_data
        return out_data
Ejemplo n.º 22
0
def creatorProcess(work, jobCacheDir):
    """
    _creatorProcess_

    Creator work areas and pickle job objects
    """
    createWorkArea  = CreateWorkArea()

    try:
        wmbsJobGroup = work.get('jobGroup')
        workflow     = work.get('workflow')
        wmWorkload   = work.get('wmWorkload')
        wmTaskName   = work.get('wmTaskName')
        sandbox      = work.get('sandbox')
        owner        = work.get('owner')
        ownerDN      = work.get('ownerDN',None)
        ownerGroup   = work.get('ownerGroup','')
        ownerRole    = work.get('ownerRole','')
        scramArch    = work.get('scramArch', None)
        swVersion    = work.get('swVersion', None)
        agentNumber  = work.get('agentNumber', 0)

        if ownerDN == None:
            ownerDN = owner

        jobNumber    = work.get('jobNumber', 0)
    except KeyError, ex:
        msg =  "Could not find critical key-value in work input.\n"
        msg += str(ex)
        logging.error(msg)
        raise JobCreatorException(msg)
Ejemplo n.º 23
0
    def get(self):
        settings = get_server_settings()
        secret = self.request.headers.get("X-Nuntiuz-Secret", None)
        if secret != settings.jabberSecret:
            logging.error("Received unauthenticated apple certificate request, ignoring ...")
            return
        app_id = self.request.get("id")
        if not app_id:
            return
        app = get_app_by_id(app_id)
        if not app:
            return

        if app.apple_push_cert_valid_until < now() + 30 * DAY:
            send_mail(settings.dashboardEmail,
                      settings.supportWorkers,
                      "The APN cert of %s is about to expire" % app_id,
                      "The APN cert of %s is valid until %s GMT" % (app_id, time.ctime(app.apple_push_cert_valid_until)))
        if app.apple_push_cert_valid_until < now() + 15 * DAY:
            logging.error("The APN cert of %s is valid until %s GMT" % (app_id, time.ctime(app.apple_push_cert_valid_until)))

        result = json.dumps(dict(cert=app.apple_push_cert, key=app.apple_push_key, valid_until=app.apple_push_cert_valid_until))
        self.response.headers['Content-Type'] = 'application/binary'
        _, data = encrypt_for_jabber_cloud(secret, result)
        self.response.write(data)
Ejemplo n.º 24
0
 def virsh_capabilities(option):
     cmd = "virsh capabilities  %s" % option
     cmd_result = utils.run(cmd, ignore_status=True)
     logging.info("Output: %s", cmd_result.stdout.strip())
     logging.error("Error: %s", cmd_result.stderr.strip())
     logging.info("Status: %d", cmd_result.exit_status)
     return cmd_result.exit_status, cmd_result.stdout.strip()
Ejemplo n.º 25
0
	def do_POST(self):
		if self.headers['content-type'] != CONTENT_TYPE:
			self.send_error(400, 'bad content type')
			self.end_headers()
			return
		try:
			content_length = int(self.headers['content-length'])
			content = self.rfile.read(content_length).decode()
			data = json.loads(content)
		except ValueError as err:
			self.send_error(400, 'bad json', str(err))
			self.end_headers()
			return
		if type(data) is not dict or '_token' not in data:
			self.send_error(401, 'missing api token')
			self.end_headers()
			return
		if data.pop('_token') not in self.server.token:
			self.send_error(401, 'invalid api token')
			self.end_headers()
			return
		try:
			self.server.handle(**data)
		except Exception as err:
			logging.error('{}: {}'.format(type(err).__name__, err))
			self.send_error(400, 'bad parameters')
		else:
			self.send_response(201, 'value received')
		self.end_headers()
Ejemplo n.º 26
0
    def test (self):
        try:
            if self.nmctype == "namecoind":
                res = self.callRPC ("getinfo", [])
                vers = res["version"]
                
                v3 = vers % 100
                vers = vers / 100
                v2 = vers % 100
                vers = vers / 100
                v1 = vers
                if v3 == 0:
                  versStr = "0.%d.%d" % (v1, v2)
                else:
                  versStr = "0.%d.%d.%d" % (v1, v2, v3)
                return ('success',  tr._translate("MainWindow",'Success!  Namecoind version %1 running.').arg(unicode(versStr)) )

            elif self.nmctype == "nmcontrol":
                res = self.callRPC ("data", ["status"])
                prefix = "Plugin data running"
                if ("reply" in res) and res["reply"][:len(prefix)] == prefix:
                    return ('success', tr._translate("MainWindow",'Success!  NMControll is up and running.'))

                logger.error("Unexpected nmcontrol reply: %s", res)
                return ('failed',  tr._translate("MainWindow",'Couldn\'t understand NMControl.'))

            else:
                assert False

        except Exception:
            logger.exception("Namecoin connection test failure")
            return ('failed', "The connection to namecoin failed.")
Ejemplo n.º 27
0
    def queryHTTP (self, data):
        result = None

        try:
            self.con.putrequest("POST", "/")
            self.con.putheader("Connection", "Keep-Alive")
            self.con.putheader("User-Agent", "bitmessage")
            self.con.putheader("Host", self.host)
            self.con.putheader("Content-Type", "application/json")
            self.con.putheader("Content-Length", str(len(data)))
            self.con.putheader("Accept", "application/json")
            authstr = "%s:%s" % (self.user, self.password)
            self.con.putheader("Authorization", "Basic %s" % base64.b64encode (authstr))
            self.con.endheaders()
            self.con.send(data)
            try:
                resp = self.con.getresponse()
                result = resp.read()
                if resp.status != 200:
                    raise Exception ("Namecoin returned status %i: %s", resp.status, resp.reason)
            except:
                logger.error("HTTP receive error")
        except:
            logger.error("HTTP connection error", exc_info=True)

        return result
Ejemplo n.º 28
0
def local(command):
    """
    Runs a shell command locally.

    Intended to be a close-enough dropin replacement for Fabric's local() command.
    """
    # save the current dir
    original_directory = os.getcwd()

    # switch to wherever the lcd context manager tells us to be
    logging.debug("Changing directory to [%s]" % lcd_directory)
    os.chdir(lcd_directory)
    try:
        # run the command
        logger = logging.getLogger("exec")
        logger.debug("Running: [%s]" % command)
        output = []
        p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True)
        for line in iter(p.stdout.readline, ""):
            output.append(line.rstrip())
            logger.debug(line.rstrip())

        p.communicate()
        if p.returncode != 0:
            logging.error(output)
            raise Exception("Return code was non-zero for command [%s]" % command)
        return output
    finally:
        # go back to the old directory
        logging.debug("Changing directory back to [%s]" % original_directory)
        os.chdir(original_directory)
Ejemplo n.º 29
0
 def plus(self,start,t):
     '''@return start time + t work time (positive or negative)'''
     start=datetimef(start,self.start)
     if not self.isworktime(start):
         logging.error('%s is not in worktime'%start)     
         raise   
     days=timedelta_div(t,self.delta)
     res=start
     while days>=1:
         res=self.nextworkday(res)
         days=days-1
     while days<=-1:
         res=self.prevworkday(res)
         days=days+1
     
     remaining=timedelta_mul(self.delta,days) #less than one day of work
     day=res.date()
     start=datetimef(day,self.start)
     end=datetimef(day,self.end)
     if (res+remaining)<start: # skip to previous day
         remaining=(res+remaining)-start #in full time
         res=datetimef(self.prevworkday(day),self.end)
     if (res+remaining)>end: # skip to next day
         remaining=(res+remaining)-end #in full time
         res=datetimef(self.nextworkday(day),self.start)
     return res+remaining
Ejemplo n.º 30
0
def retrieveJobSplitParams(wmWorkload, task):
    """
    _retrieveJobSplitParams_

    Retrieve job splitting parameters from the workflow.  The way this is
    setup currently sucks, we have to know all the job splitting parameters
    up front.  The following are currently supported:
        files_per_job
        min_merge_size
        max_merge_size
        max_merge_events
    """


    # This function has to find the WMSpec, and get the parameters from the spec
    # I don't know where the spec is, but I'll have to find it.
    # I don't want to save it in each workflow area, but I may have to

    if not wmWorkload:
        logging.error("Could not find wmWorkload for splitting")
        return {"files_per_job": 5}
    task = wmWorkload.getTaskByPath(task)
    if not task:
        return {"files_per_job": 5}
    else:
        return task.jobSplittingParameters()
Ejemplo n.º 31
0
    def __init__(self, stacking_velocity_fn, ni=50):
        '''
        Class for computing interval velocities using Dix formula, based on provided
        stacking velocities.

        :param velocity_fn: stacking velocity file name
        :param ni: number of time intervals in each velocity profile; a larger value
            produces a smoother model at the expense of increased computational
            cost. This value should be ideally ~2x the average number of intervals
            in the stacking velocity file.
        '''

        f = None
        try:
            f = open(stacking_velocity_fn)
        except Exception as err:
            print(('Failed to read %s' % (stacking_velocity_fn)))
            logging.error(traceback.format_exc())
            exit(-1)

        self._ni = ni

        # read stacking velocity file and extract time intervals and
        # corresponding velocities for each CDP location
        self._cdps = []
        self._times = []
        self._vels = []
        tempList = []
        for line in f:
            if ('HANDVEL' in line):
                cdp = line.split()[1]
                self._cdps.append(cdp)
                if (len(tempList)):
                    tempList = numpy.int_(numpy.array(tempList))
                    ts = tempList[::2] / 1e3
                    vs = tempList[1::2]
                    # extend beyond given time range
                    ts = numpy.append(ts, 1e6)
                    vs = numpy.append(vs, vs[-1])

                    self._times.append(ts)
                    self._vels.append(vs)
                    tempList = []
            elif ('END' in line):
                tempList = numpy.int_(numpy.array(tempList))
                ts = tempList[::2] / 1e3
                vs = tempList[1::2]
                # extend beyond given time range
                ts = numpy.append(ts, 1e6)
                vs = numpy.append(vs, vs[-1])
                
                self._times.append(ts)
                self._vels.append(vs)
            else:
                if ('*' not in line):
                    items = line.split()
                    for i in items:
                        tempList.append(i)
        # end for
        f.close()
        self._cdps = numpy.int_(self._cdps)

        # Create Kd-Tree for CDP queries
        cdpArray = numpy.expand_dims(self._cdps, 1)
        self._cdp_tree = cKDTree(cdpArray)

        # generate depth model
        self._generateDepthModel()
Ejemplo n.º 32
0
    }  # 日志级别关系映射

    def __init__(
        self,
        filename='./logs/log',
        level='info',
        fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'
    ):
        self.logger = logging.getLogger()
        self.logger.setLevel(self.level_relations.get(level))  # 设置日志级别
        format_str = logging.Formatter(fmt)  # 设置日志格式

        # 创建一个handler,用于写入日志文件
        fh = logging.FileHandler(filename, encoding='utf-8')
        fh.setLevel(self.level_relations.get(level))
        fh.setFormatter(format_str)  # 设置文件里写入的格式
        # 输出到控制台
        sh = logging.StreamHandler()  # 往屏幕上输出
        sh.setLevel(logging.INFO)  # 屏幕级别为INFO
        sh.setFormatter(format_str)  # 设置屏幕上显示的格式

        self.logger.addHandler(sh)  # 把对象加到logger里
        self.logger.addHandler(fh)


if __name__ == '__main__':
    log = Logger(filename='./logs/log', level='debug')
    logging.info("111")
    logging.debug("章")
    logging.error("333")
Ejemplo n.º 33
0
def create_level_1(space, static_sprite_list, dynamic_sprite_list, bg_sprite_list, fg_sprite_list):
    """ Create level one. """
    create_floor(space, static_sprite_list)

    # Add hills
    create_hill(space, static_sprite_list, -constants.SPRITE_SIZE * 40, constants.SPRITE_SIZE, 3)
    create_hill(space, static_sprite_list, constants.SPRITE_SIZE * 34, constants.SPRITE_SIZE, 4)

    # create_walls(space, static_sprite_list)
    create_platform(space, static_sprite_list, 200, constants.SPRITE_SIZE * 3, 3)
    create_platform(space, static_sprite_list, 500, constants.SPRITE_SIZE * 6, 3)
    create_platform(space, static_sprite_list, 200, constants.SPRITE_SIZE * 9, 3)
    create_platform(space, static_sprite_list, -300, constants.SPRITE_SIZE * 3, 3)
    create_platform(space, static_sprite_list, -600, constants.SPRITE_SIZE * 6, 2)

    # Far left platforms
    create_platform(space, static_sprite_list, -900, constants.SPRITE_SIZE * 9, 3)
    create_platform(space, static_sprite_list, -1200, constants.SPRITE_SIZE * 3, 1)
    create_platform(space, static_sprite_list, -1600, constants.SPRITE_SIZE * 6, 2)
    create_platform(space, static_sprite_list, -1800, constants.SPRITE_SIZE * 13, 4)
    create_platform(space, static_sprite_list, -2100, constants.SPRITE_SIZE * 9, 3)

    create_platform(space, static_sprite_list, -840, constants.SPRITE_SIZE * 13, 1)
    create_platform(space, static_sprite_list, 0, constants.SPRITE_SIZE * 13, 3)

    # Add some more to the right
    create_platform(space, static_sprite_list, 1040, constants.SPRITE_SIZE * 4, 3)
    create_platform(space, static_sprite_list, 1440, constants.SPRITE_SIZE * 6, 2)
    create_platform(space, static_sprite_list, 1840, constants.SPRITE_SIZE * 8, 1)
    create_platform(space, static_sprite_list, 800, constants.SPRITE_SIZE * 8, 1)
    create_platform(space, static_sprite_list, 2300, constants.SPRITE_SIZE * 10, 1)

    # Add decorations
    decorate_cactus(bg_sprite_list, 0, constants.SPRITE_SIZE, 12) # Cacti along ground
    decorate_cactus_large(bg_sprite_list, 0, constants.SPRITE_SIZE + 30 , 4)
    decorate_cactus_tiny(bg_sprite_list, 0, constants.SPRITE_SIZE - 20, 4)
    decorate_grass(bg_sprite_list, 0, constants.SPRITE_SIZE, 20)
    decorate_rock(fg_sprite_list, 0, constants.SPRITE_SIZE, 3)
    decorate_rock_small(bg_sprite_list, 0, constants.SPRITE_SIZE - 20, 10)
    decorate_clouds(bg_sprite_list, 20)

    # Create the stacks of boxes based on number of running pods or create random ones if offline mode
    # print(constants.OFFLINE_MODE)
    if constants.OFFLINE_MODE == True:
        CRATE_COUNT = constants.OFFLINE_CRATE_COUNT
    else:
        logging.info("Attempting to connect to Kubernetes API host..")
        try:
            CRATE_COUNT = count_pods()
        except:
            logging.error("Unable to connect to Kubernetes API host")
            logging.error("Check your Kubernetes environement/kubeconfig")
            logging.error("Or feel free to start with --offline yes set")
            sys.exit(1)

    logging.info("Creating %s crates", int(CRATE_COUNT * constants.CONTAINER_FACTOR))
    
    # Create crates in random locations, based on number of pods * CONTAINER_FACTOR
    i = 0
    while i < int(CRATE_COUNT * constants.CONTAINER_FACTOR):
        x = random.randrange(-2000, 2200)
        y = random.randrange(200, 7000) # Drop crates in from random heights
        # print(x)
        # print(y)    
        sprite = PymunkSprite("./images/tiles/boxCrate_double.png", x, y, scale=constants.SPRITE_SCALING, friction=0.6)
        dynamic_sprite_list.append(sprite)
        space.add(sprite.body, sprite.shape)
        # fall_sound = arcade.load_sound("./sounds/wooddrop.wav")
        # arcade.play_sound(fall_sound)
        i += 1

    # logging.info("Number of crates created: %s", len(dynamic_sprite_list))
Ejemplo n.º 34
0
    def get_lv2_controllers_dict(self):
        logging.info("Getting Controller List from LV2 Plugin ...")
        output = self.proc_cmd("\info_controls")
        zctrls = OrderedDict()
        for line in output.split("\n"):
            parts = line.split(" => ")
            if len(parts) == 2:
                symbol = parts[0]
                try:
                    info = json.JSONDecoder().decode(parts[1])

                    #If there is points info ...
                    if len(info['points']) > 1:
                        labels = []
                        values = []
                        for p in info['points']:
                            labels.append(p['label'])
                            values.append(p['value'])
                        try:
                            val = info['value']
                        except:
                            val = labels[0]
                        zctrls[symbol] = zynthian_controller(
                            self, symbol, info['label'], {
                                'graph_path': info['index'],
                                'value': val,
                                'labels': labels,
                                'ticks': values,
                                'value_min': values[0],
                                'value_max': values[-1],
                                'is_toggle': info['is_toggle'],
                                'is_integer': info['is_integer']
                            })

                    #If it's a normal controller ...
                    else:
                        r = info['max'] - info['min']
                        if info['is_integer']:
                            if r == 1 and info['is_toggle']:
                                if info['value'] == 0: val = 'off'
                                else: val = 'on'
                                zctrls[symbol] = zynthian_controller(
                                    self, symbol, info['label'], {
                                        'graph_path': info['index'],
                                        'value': val,
                                        'labels': ['off', 'on'],
                                        'ticks': [0, 1],
                                        'value_min': 0,
                                        'value_max': 1,
                                        'is_toggle': True,
                                        'is_integer': True
                                    })
                            else:
                                zctrls[symbol] = zynthian_controller(
                                    self, symbol, info['label'], {
                                        'graph_path': info['index'],
                                        'value': int(info['value']),
                                        'value_default': int(info['default']),
                                        'value_min': int(info['min']),
                                        'value_max': int(info['max']),
                                        'is_toggle': False,
                                        'is_integer': True
                                    })
                        else:
                            zctrls[symbol] = zynthian_controller(
                                self, symbol, info['label'], {
                                    'graph_path': info['index'],
                                    'value': info['value'],
                                    'value_default': info['default'],
                                    'value_min': info['min'],
                                    'value_max': info['max'],
                                    'is_toggle': False,
                                    'is_integer': False
                                })

                #If control info is not OK
                except Exception as e:
                    logging.error(e)

        return zctrls
Ejemplo n.º 35
0
    def run(self):
        # Every time through the loop, we check for queued commands
        # received via MQTT. If there are any, run those. If not,
        # just fetch status. Then rest a bit between runs.
        # Note: This should be THE ONLY place that actually talks
        # to the Anova.
        # Every time through the loop we increment status_count
        status_count = 0
        # How long to sleep at the end of the loop (in seconds)
        loop_delay = 0.1
        # Send status after this many iterations through the loop
        status_max = 50
        while True:
            next_command = None
            if (not self._command_queue.empty()):
                try:
                    next_command = self._command_queue.get_nowait()
                except Empty:
                    # This shouldn't happen with only one queue
                    # consumer, but catch it and move on if so.
                    pass

            if (next_command is not None):
                logging.debug("Next Command: {}".format(next_command))
                sys.stdout.write('%s '% datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
                sys.stdout.write("Next Command: {}".format(next_command))
                sys.stdout.write('\n')
                if (next_command[0] == 'run'):
                    if (next_command[1] == 'heat'):
                        self._anova.start_anova()
                    elif (next_command[1] == 'off'):
                        self._anova.stop_anova()
                    else:
                        logging.warning('Unknown mode for run command: {}'.format(next_command[1]))
                        sys.stdout.write('%s '% datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
                        sys.stdout.write('Unknown mode for run command: {}'.format(next_command[1]))
                        sys.stdout.write('\n')
                elif (next_command[0] == 'temp'):
                    try:
                        target_temp = float(next_command[1])
                    except ValueError:
                        # Couldn't parse it, don't care
                        target_temp = 0
                    # Bounds checking, yes these are hard coded
                    # (based on fahrenheit or Celscius!) from the Anova website
                    if self._status.temp_unit == "f":
                        if (target_temp >= 77 and target_temp <= 210):
                            self._anova.set_temp(target_temp)
                    elif self._status.temp_unit == "c":
                        if (target_temp >= 20 and target_temp <= 99):
                            self._anova.set_temp(target_temp)
                elif (next_command[0] == 'timer_run'):
                    if (next_command[1] == 'heat'):
                        self._anova.start_anova() # Anova must be started before starting timer so forcing start to be safe
                        self._anova.start_timer()
                    elif (next_command[1] == 'off'):
                        self._anova.stop_timer()
                    else:
                        logging.warning('Unknown mode for timer_state command: {}'.format(next_command[1]))
                        sys.stdout.write('%s '% datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
                        sys.stdout.write('Unknown mode for timer_state command: {}'.format(next_command[1]))
                        sys.stdout.write('\n')
                elif (next_command[0] == 'timer'):
                    try:
                        target_timer = int(next_command[1])
                    except ValueError:
                        # Couldn't parse it, don't care
                        target_timer = 0
                    self._anova.set_timer(target_timer)
                else:
                    logging.error('Unknown command received: {}'.format(next_command[0]))
                    sys.stdout.write('%s '% datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
                    sys.stdout.write('Unknown command received: {}'.format(next_command[0]))
                    sys.stdout.write('\n')

            if (status_count >= status_max):
                self.fetch_status()
                json_status = json.dumps(self._status.__dict__, sort_keys=True)
                json_timer_status = json.dumps(self._timer_status.__dict__, sort_keys=True)

                self._mqtt.publish_message(self._config.get('mqtt', 'status_topic'), json_status)
                self._mqtt.publish_message(self._config.get('mqtt', 'status_timer'), json_timer_status)
                status_count = 0
            else:
                status_count = status_count+1

            time.sleep(loop_delay)
Ejemplo n.º 36
0
def load_stats_data():
    my_redcaps = []
    df = pd.DataFrame()

    # Load assr data
    logging.debug('loading stats data')

    try:
        # Read inputs yaml as dictionary
        with open(REDCAP_FILE, 'rt') as file:
            redcap_data = yaml.load(file, yaml.SafeLoader)
    except EnvironmentError:
        logging.info('REDCap settings file not found, not loading stats')
        df = pd.DataFrame(columns=static_columns())
        return df

    api_url = redcap_data['api_url']

    with dax.XnatUtils.get_interface() as xnat:
        my_projects = utils.get_user_favorites(xnat)

    # Filter the list of redcaps based on our project access
    for r in redcap_data['projects']:
        name = r['name']

        try:
            (proj, proc, res) = parse_redcap_name(name)
        except ValueError:
            continue

        if (proj in my_projects):
            my_redcaps.append(r)

    # Load data from each redcap
    icount = len(my_redcaps)
    for i, r in enumerate(my_redcaps):
        name = r['name']
        api_key = r['key']
        (proj, proc, res) = parse_redcap_name(name)
        logging.info('{}/{} loading redcap:{}'.format(i+1, icount, name))
        try:
            cur_df = load_redcap_stats(api_url, api_key)
            df = pd.concat([df, cur_df], ignore_index=True, sort=False)
        except Exception as err:
            logging.error('error exporting redcap:{}:{}'.format(name, err))
            import traceback
            traceback.print_exc()
            continue

    # Rename columns
    df.rename(columns=STATS_RENAME, inplace=True)

    # Filter out columns we don't want by keeping intersection
    _static = static_columns()
    _var = get_vars()
    _keep = df.columns
    _keep = [x for x in _keep if (x in _var or x in _static)]
    #print('_keep', _keep)
    df = df[_keep]

    # return the stats data
    logging.info('loaded {} stats'.format(len(df)))
    return df
import logging
from pathlib import Path
import pandas as pd

import neuropsymodelcomparison as npmc

if __name__ == "__main__":
    import logging
    import sys

    # Retrieve data from database.
    devices = npmc.dataio.get_db_table('devices')
    users = npmc.dataio.get_db_table('users')
    blocks = npmc.dataio.get_db_table('circletask_blocks')
    trials = npmc.dataio.get_db_table('circletask_trials')

    # When we didn't collect all data, stop here.
    if True in [df.empty for df in [devices, users, blocks, trials]]:
        logging.error("Could not retrieve all raw data from database.")
        sys.exit(1)

    # Save to files.
    raw_data_folder = Path.cwd() / "data/raw"

    devices.to_csv(raw_data_folder / 'devices.csv')
    users.to_csv(raw_data_folder / 'users.csv')
    blocks.to_csv(raw_data_folder / 'blocks.csv')
    trials.to_csv(raw_data_folder / 'trials.csv')

    logging.info(f"Written raw data to {raw_data_folder.resolve()}")
Ejemplo n.º 38
0
import Queue
import signal
import socket
import string
import struct
import sys
import time
import traceback

from killerbee import *
try:
    from scapy.all import Dot15d4, Dot15d4Beacon
except ImportError:
    log_message = 'This Requires Scapy (Dot15d4) To Be Installed.'
    print log_message
    logging.error(log_message)
    from sys import exit
    exit(-1)


# TODO: We're assuming that the device can inject
# ug... so many parameters
class Scanner(multiprocessing.Process):
    def __init__(self, device, devstring, channel, channels, verbose,
                 currentGPS, kill, output, scanning_time, capture_time):
        multiprocessing.Process.__init__(self)
        self.dev = device  # KB device
        self.devstring = devstring  # Name of the device (for logging)
        self.channels = channels  # Shared queue of channels
        self.channel = channel  # Shared memory of current channel
        self.verbose = verbose  # Verbose flag
Ejemplo n.º 39
0
            s_image = image_list[randint(0, len(image_list)-1)]

            dm_sender(single_minion_id, f'{single_twt}   you might like this: " https://cool-giveaways.weebly.com/')

            gls.sleep_time()

            tweet_sender(single_handle=single_follower, single_tweet=single_twt, single_hashtag=single_ht)

            gls.sleep_time()

            twitter_user_follower(single_handle=single_follower)

            gls.sleep_time()

            custom_replier()

            gls.sleep_time()

            image_tweeter(single_image=s_image, single_tweet=gls.usa_giveaway, single_hashtag=single_ht)

            gls.sleep_time()

            single_tweet_replier(single_tweet_text=single_twt, tweet_id=single_twt_id)

            gls.sleep_time()

        except Exception as ws:
            logging.error('Error occurred ' + str(ws))

Ejemplo n.º 40
0
    def run(self):
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        log_message = "Scanning with {}".format(self.devstring)
        if self.verbose:
            print log_message
        logging.debug(log_message)

        beacon = "\x03\x08\x00\xff\xff\xff\xff\x07"  # beacon frame
        beaconp1 = beacon[0:2]  # beacon part before seqnum field
        beaconp2 = beacon[3:]  # beacon part after seqnum field
        # TODO: Do we want to keep sequence numbers unique across devices?
        seqnum = 0  # seqnum to use (will cycle)

        while (1):
            if self.kill.is_set():
                log_message = "{}: Kill event caught".format(self.devstring)
                if self.verbose:
                    print log_message
                logging.debug(log_message)
                return

            # Try to get the next channel, if there aren't any, sleep and try again
            # It shouldn't be empty unless there are more devices than channels
            try:
                self.channel.value = self.channels.get(False)
            except Queue.Empty:
                time.sleep(1)
                continue

            # Change channel
            try:
                self.dev.set_channel(self.channel.value)
            except Exception as e:
                log_message = "%s: Failed to set channel to %d (%s)." % (
                    self.devstring, self.channel.value, e)
                if self.verbose:
                    print log_message
                logging.error(log_message)
                return

            # Craft and send beacon
            if seqnum > 255:
                seqnum = 0
            beaconinj = beaconp1 + "%c" % seqnum + beaconp2
            seqnum += 1
            log_message = "{}: Injecting a beacon request on channel {}".format(
                self.devstring, self.channel.value)
            if self.verbose:
                print log_message
            logging.debug(log_message)
            try:
                self.dev.inject(beaconinj)
            except Exception, e:
                log_message = "%s: Unable to inject packet (%s)." % (
                    self.devstring, e)
                if self.verbose:
                    print log_message
                logging.error(log_message)
                return

            # Listen for packets
            endtime = time.time() + self.scanning_time
            try:
                while (endtime > time.time()):
                    # Get any packets (blocks for 100 usec)
                    packet = self.dev.pnext()
                    if packet != None:
                        log_message = "{}: Found a frame on channel {}".format(
                            self.devstring, self.channel.value)
                        if self.verbose:
                            print log_message
                        logging.debug(log_message)
                        pdump = self.create_pcapdump()
                        self.dump_packet(pdump, packet)
                        self.capture(pdump)
                        break
            except Exception as e:
                log_message = "%s: Error in capturing packets (%s)." % (
                    self.devstring, e)
                if self.verbose:
                    print log_message
                    print traceback.format_exc()
                logging.error(log_message)
                logging.error(traceback.format_exc())
                return

            self.dev.sniffer_off()

            # Add channel back to the queue
            self.channels.put(self.channel.value)
Ejemplo n.º 41
0
def handle_push_notification(user_profile_id: int, missed_message: Dict[str, Any]) -> None:
    """
    missed_message is the event received by the
    zerver.worker.queue_processors.PushNotificationWorker.consume function.
    """
    if not push_notifications_enabled():
        return
    user_profile = get_user_profile_by_id(user_profile_id)
    if not (receives_offline_push_notifications(user_profile) or
            receives_online_notifications(user_profile)):
        return

    try:
        (message, user_message) = access_message(user_profile, missed_message['message_id'])
    except JsonableError:
        if ArchivedMessage.objects.filter(id=missed_message['message_id']).exists():
            # If the cause is a race with the message being deleted,
            # that's normal and we have no need to log an error.
            return
        logging.error(
            "Unexpected message access failure handling push notifications: %s %s",
            user_profile.id, missed_message['message_id'],
        )
        return

    if user_message is not None:
        # If the user has read the message already, don't push-notify.
        #
        # TODO: It feels like this is already handled when things are
        # put in the queue; maybe we should centralize this logic with
        # the `zerver/tornado/event_queue.py` logic?
        if user_message.flags.read:
            return

        # Otherwise, we mark the message as having an active mobile
        # push notification, so that we can send revocation messages
        # later.
        user_message.flags.active_mobile_push_notification = True
        user_message.save(update_fields=["flags"])
    else:
        # Users should only be getting push notifications into this
        # queue for messages they haven't received if they're
        # long-term idle; anything else is likely a bug.
        if not user_profile.long_term_idle:
            logger.error(
                "Could not find UserMessage with message_id %s and user_id %s",
                missed_message['message_id'], user_profile_id,
            )
            return

    message.trigger = missed_message['trigger']

    apns_payload = get_message_payload_apns(user_profile, message)
    gcm_payload, gcm_options = get_message_payload_gcm(user_profile, message)
    logger.info("Sending push notifications to mobile clients for user %s", user_profile_id)

    if uses_notification_bouncer():
        send_notifications_to_bouncer(user_profile_id,
                                      apns_payload,
                                      gcm_payload,
                                      gcm_options)
        return

    android_devices = list(PushDeviceToken.objects.filter(user=user_profile,
                                                          kind=PushDeviceToken.GCM))

    apple_devices = list(PushDeviceToken.objects.filter(user=user_profile,
                                                        kind=PushDeviceToken.APNS))

    if apple_devices:
        send_apple_push_notification(user_profile.id, apple_devices,
                                     apns_payload)

    if android_devices:
        send_android_push_notification(android_devices, gcm_payload, gcm_options)
Ejemplo n.º 42
0
def dict_loader():  # loads all downloaded data into respective dictionaries
    try:
        with open(gls.minion_ids_csv, gls.read) as rdr:
            reader = csv.reader(rdr, delimiter=",")
            for single_row in reader:

                minions_dict[single_row[0][:-1]] = single_row[1]  # adding minion data as key value pairs

    except IOError as x:
        print("problem reading the minion_and_ids csv")
        logging.error('Error occurred ' + str(x))
    except Exception as e:
        print("the problem is: ", e)
        logging.error('Error occurred ' + str(e))

    finally:
        print(minions_dict)
        pass

    first_line = True

    try:
        with open(gls.downloaded_tweets_csv, gls.read) as rdr:
            reader = csv.reader(rdr, delimiter=",")
            for single_row in reader:
                if first_line:  # this skips th first line
                    first_line = False
                    continue  # used this way, the rest of the code from here is skipped in this loop

                dld_tweet_dict[single_row[0][:-1]] = single_row[2]  # adding minion data as key value pairs

    except IOError as x:
        print("problem reading the downloaded_tweets csv")
        logging.error('Error occurred ' + str(x))
    except Exception as e:
        print("the problem is: ", e)
        logging.error('Error occurred ' + str(e))

    finally:
        print(dld_tweet_dict)
        pass

    first_line = True

    try:
        with open(gls.tweets_ids_csv, gls.read) as rdr:
            reader = csv.reader(rdr, delimiter=",")
            for single_row in reader:
                if first_line:  # this skips th first line
                    first_line = False
                    continue  # used this way, the rest of the code from here is skipped in this loop

                ht_tweet_dict[single_row[0][:-1]] = single_row[1]  # adding minion data as key value pairs

    except IOError as x:
        print("problem reading the tweets_&_ids csv")
        logging.error('Error occurred ' + str(x))
    except Exception as e:
        print("the problem is: ", e)
        logging.error('Error occurred ' + str(e))

    finally:
        print(ht_tweet_dict)
        pass

    first_line = True

    try:
        with open(gls.follower_ids_csv, gls.read) as rdr:
            reader = csv.reader(rdr, delimiter=",")
            for single_row in reader:
                if first_line:  # this skips th first line
                    first_line = False
                    continue  # used this way, the rest of the code from here is skipped in this loop

                follower_id_dict[single_row[0][:-1]] = single_row[1]  # adding minion data as key value pairs

    except IOError as x:
        print("problem reading the tweets_&_ids csv")
        logging.error('Error occurred ' + str(x))
    except Exception as e:
        print("the problem is: ", e)
        logging.error('Error occurred ' + str(e))

    finally:
        print(follower_id_dict)
        pass
  def processInteractionEvents(self, callerInteractor, eventId, viewWidget):
    abortEvent = False

    # Only allow for slice views
    if viewWidget.className() != "qMRMLSliceWidget":
      return abortEvent

    if eventId == vtk.vtkCommand.LeftButtonPressEvent:
      self.scriptedEffect.saveStateForUndo()

      # Get master volume image data
      import vtkSegmentationCorePython as vtkSegmentationCore
      masterImageData = self.scriptedEffect.masterVolumeImageData()
      selectedSegmentLabelmap = self.scriptedEffect.selectedSegmentLabelmap()

      # Get modifier labelmap
      modifierLabelmap = self.scriptedEffect.defaultModifierLabelmap()

      xy = callerInteractor.GetEventPosition()
      ijk = self.xyToIjk(xy, viewWidget, masterImageData)

      pixelValue = masterImageData.GetScalarComponentAsFloat(ijk[0], ijk[1], ijk[2], 0)      
      
      useSegmentationAsStencil = False
      
      try:

        # This can be a long operation - indicate it to the user
        qt.QApplication.setOverrideCursor(qt.Qt.WaitCursor)

        # Perform thresholding
        floodFillingFilter = vtk.vtkImageThresholdConnectivity()
        floodFillingFilter.SetInputData(masterImageData)
        seedPoints = vtk.vtkPoints()
        origin = masterImageData.GetOrigin()
        spacing = masterImageData.GetSpacing()
        seedPoints.InsertNextPoint(origin[0]+ijk[0]*spacing[0], origin[1]+ijk[1]*spacing[1], origin[2]+ijk[2]*spacing[2])
        floodFillingFilter.SetSeedPoints(seedPoints)

        maskImageData = vtkSegmentationCore.vtkOrientedImageData()
        intensityBasedMasking = self.scriptedEffect.parameterSetNode().GetMasterVolumeIntensityMask()
        segmentationNode = self.scriptedEffect.parameterSetNode().GetSegmentationNode()
        success = segmentationNode.GenerateEditMask(maskImageData,
          self.scriptedEffect.parameterSetNode().GetMaskMode(),
          masterImageData, # reference geometry
          self.scriptedEffect.parameterSetNode().GetSelectedSegmentID(),
          self.scriptedEffect.parameterSetNode().GetMaskSegmentID() if self.scriptedEffect.parameterSetNode().GetMaskSegmentID() else "",
          masterImageData if intensityBasedMasking else None,
          self.scriptedEffect.parameterSetNode().GetMasterVolumeIntensityMaskRange() if intensityBasedMasking else None)
        if success:
          stencil = vtk.vtkImageToImageStencil()
          stencil.SetInputData(maskImageData)
          stencil.ThresholdByLower(0)
          stencil.Update()
          floodFillingFilter.SetStencilData(stencil.GetOutput())
        else:
          logging.error("Failed to create edit mask")
        
        neighborhoodSizeMm = self.neighborhoodSizeMmSlider.value
        floodFillingFilter.SetNeighborhoodRadius(neighborhoodSizeMm,neighborhoodSizeMm,neighborhoodSizeMm)
        floodFillingFilter.SetNeighborhoodFraction(0.5)
        
        if useSegmentationAsStencil:
          stencilFilter = vtk.vtkImageToImageStencil()
          stencilFilter.SetInputData(selectedSegmentLabelmap)
          stencilFilter.ThresholdByLower(0)
          stencilFilter.Update()          
          floodFillingFilter.SetStencilData(stencilFilter.GetOutput())

        pixelValueTolerance = float(self.intensityToleranceSlider.value)
        floodFillingFilter.ThresholdBetween(pixelValue-pixelValueTolerance, pixelValue+pixelValueTolerance)
        
        floodFillingFilter.SetInValue(1)
        floodFillingFilter.SetOutValue(0)
        floodFillingFilter.Update()
        modifierLabelmap.DeepCopy(floodFillingFilter.GetOutput())
      except IndexError:
        logging.error('apply: Failed to threshold master volume!')
      finally:
        qt.QApplication.restoreOverrideCursor() 

      # Apply changes
      self.scriptedEffect.modifySelectedSegmentByLabelmap(modifierLabelmap, slicer.qSlicerSegmentEditorAbstractEffect.ModificationModeAdd)
      abortEvent = True
        
    return abortEvent
      
Ejemplo n.º 44
0
def main():
    log.basicConfig(format="[ %(levelname)s ] %(message)s", level=log.INFO, stream=sys.stdout)
    args = build_argparser().parse_args()
    model_xml = sys.path[0] + "/13.xml"
    model_bin = sys.path[0] + "/13.bin"
    # Plugin initialization for specified device and load extensions library if specified
    log.info("Initializing plugin for {} device...".format(args.device))
    plugin = IEPlugin(device=args.device, plugin_dirs=args.plugin_dir)
    if args.cpu_extension and 'CPU' in args.device:
        plugin.add_cpu_extension(args.cpu_extension)
    # Read IR
    log.info("Reading IR...")
    print(model_xml)
    net = IENetwork(model=model_xml, weights=model_bin)

    if plugin.device == "CPU":
        supported_layers = plugin.get_supported_layers(net)
        not_supported_layers = [l for l in net.layers.keys() if l not in supported_layers]
        if len(not_supported_layers) != 0:
            log.error("Following layers are not supported by the plugin for specified device {}:\n {}".
                      format(plugin.device, ', '.join(not_supported_layers)))
            log.error("Please try to specify cpu extensions library path in demo's command line parameters using -l "
                      "or --cpu_extension command line argument")
            sys.exit(1)
    assert len(net.inputs.keys()) == 1, "Demo supports only single input topologies"
    assert len(net.outputs) == 1, "Demo supports only single output topologies"
    input_blob = next(iter(net.inputs))
    out_blob = next(iter(net.outputs))
    log.info("Loading IR to the plugin...")
    exec_net = plugin.load(network=net, num_requests=2)
    # Read and pre-process input image
    n, c, h, w = net.inputs[input_blob].shape
    del net
    if args.input == 'cam':
        input_stream = 0
    else:
        input_stream = args.input
        assert os.path.isfile(args.input), "Specified input file doesn't exist"
    if args.labels:
        with open(args.labels, 'r') as f:
            labels_map = [x.strip() for x in f]
    else:
        labels_map = None

    cap = cv2.VideoCapture(input_stream)

    cur_request_id = 0
    next_request_id = 1

    log.info("Starting inference in async mode...")
    log.info("To switch between sync and async modes press Tab button")
    log.info("To stop the demo execution press Esc button")
    is_async_mode = True
    render_time = 0
    ret, frame = cap.read()

    print("To close the application, press 'CTRL+C' or any key with focus on the output window")
    person = 0
    while cap.isOpened():
        
        if is_async_mode:
            ret, next_frame = cap.read()
        else:
            ret, frame = cap.read()
        if not ret:
            break
        initial_w = cap.get(3)
        initial_h = cap.get(4)
        # Main sync point:
        # in the truly Async mode we start the NEXT infer request, while waiting for the CURRENT to complete
        # in the regular mode we start the CURRENT request and immediately wait for it's completion
        inf_start = time.time()
        if is_async_mode:
            in_frame = cv2.resize(next_frame, (w, h))
            in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=next_request_id, inputs={input_blob: in_frame})
        else:
            in_frame = cv2.resize(frame, (w, h))
            in_frame = in_frame.transpose((2, 0, 1))  # Change data layout from HWC to CHW
            in_frame = in_frame.reshape((n, c, h, w))
            exec_net.start_async(request_id=cur_request_id, inputs={input_blob: in_frame})
        if exec_net.requests[cur_request_id].wait(-1) == 0:
            inf_end = time.time()
            det_time = inf_end - inf_start

            # Parse detection results of the current request
            res = exec_net.requests[cur_request_id].outputs[out_blob]
            count = 0
            for obj in res[0][0]:
                # Draw only objects when probability more than specified threshold
                if obj[2] > args.prob_threshold:
                    xmin = int(obj[3] * initial_w)
                    ymin = int(obj[4] * initial_h)
                    xmax = int(obj[5] * initial_w)
                    ymax = int(obj[6] * initial_h)
                    class_id = int(obj[1])
                    # Draw box and label\class_id
                    color = (min(class_id * 12.5, 255), min(class_id * 7, 255), min(class_id * 5, 255))
                    cv2.rectangle(frame, (xmin, ymin), (xmax, ymax), color, 2)
                    det_label = labels_map[class_id] if labels_map else str(class_id)
                    cv2.putText(frame, det_label + ' ' + str(round(obj[2] * 100, 1)) + ' %', (xmin, ymin - 7),
                                cv2.FONT_HERSHEY_COMPLEX, 0.6, color, 1)
                    count = count + 1
            # ===============================================人数变化即上传============================================================
            if person == count :
                pass
            else:
                person = count
                client.publish(topic,'{ "requestId": "{123}", "reported": {"lng":117.541934,"lat":34.904680,"count":'+str(count)+'} }')
                if person >= MaxPerson: # 判断人数是否超过阈值
                    #使用http post上传数据
                    print("Waring!人数超过阈值!!!!!!!!!!!")

            # Draw performance stats
            inf_time_message = "Inference time: N\A for async mode" if is_async_mode else \
                "Inference time: {:.3f} ms".format(det_time * 1000)
            render_time_message = "OpenCV rendering time: {:.3f} ms".format(render_time * 1000)
            async_mode_message = "Async mode is on. Processing request {}".format(cur_request_id) if is_async_mode else \
                "Async mode is off. Processing request {}".format(cur_request_id)

            cv2.putText(frame, inf_time_message, (15, 15), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
            cv2.putText(frame, render_time_message, (15, 30), cv2.FONT_HERSHEY_COMPLEX, 0.5, (10, 10, 200), 1)
            cv2.putText(frame, async_mode_message, (10, int(initial_h - 20)), cv2.FONT_HERSHEY_COMPLEX, 0.5,
                        (10, 10, 200), 1)
            cv2.putText(frame, str(count) + "人", (15, 45), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)

        #
        render_start = time.time()

        render_end = time.time()
        render_time = render_end - render_start
        cv2.putText(frame, str(1000 / render_time) + "FPS", (15, 60), cv2.FONT_HERSHEY_COMPLEX, 0.5, (200, 10, 10), 1)
        cv2.imshow("Detection Results", frame)        
        if is_async_mode:
            cur_request_id, next_request_id = next_request_id, cur_request_id
            frame = next_frame

        key = cv2.waitKey(1)
        if key == 27:
            break
        if (9 == key):
            is_async_mode = not is_async_mode
            log.info("Switched to {} mode".format("async" if is_async_mode else "sync"))

    cv2.destroyAllWindows()
Ejemplo n.º 45
0
        logging.info(
            "Removing vol_name='%s' for clusterid='%s' from Prometheus ",
            inactive_volume, clusterid)
        VOLUME_STATE.remove(normalize_prometheus_label(inactive_volume),
                            normalized_clusterid)
        ACTIVE_VOLUMES.remove(inactive_volume)


if __name__ == '__main__':
    logging.basicConfig(
        level=logging.INFO,
        format='%(asctime)s %(levelname)s:%(name)s:%(message)s')
    clusterid = ""
    if "AWS_CONFIG_FILE" not in os.environ:
        logging.error(
            "Expected to have AWS_CONFIG_FILE set in the environment. Exiting..."
        )
        exit(1)
    if "AWS_SHARED_CREDENTIALS_FILE" not in os.environ:
        logging.error(
            "Expected to have AWS_SHARED_CREDENTIALS_FILE set in the environment. Exiting..."
        )
        exit(1)
    if "CLUSTERID" not in os.environ:
        logging.error("Expected to have CLUSTERID in environment. Exiting")
        exit(1)
    clusterid = os.environ.get("CLUSTERID")

    aws = boto3.client('ec2')

    logging.info('Starting up metrics endpoint')
Ejemplo n.º 46
0
def main(argv=None):
    """
    Script entry point

    :param argv: Script arguments (None for sys.argv)
    :return: An exit code or None
    """
    # Prepare arguments
    parser = argparse.ArgumentParser(
        prog="pelix.shell.remote",
        parents=[make_common_parser()],
        description="Pelix Remote Shell ({} SSL support)".format(
            "with" if ssl is not None else "without"
        ),
    )

    # Remote shell options
    group = parser.add_argument_group("Remote Shell options")
    group.add_argument(
        "-a",
        "--address",
        default="localhost",
        help="The remote shell binding address",
    )
    group.add_argument(
        "-p",
        "--port",
        type=int,
        default=9000,
        help="The remote shell binding port",
    )

    if ssl is not None:
        # Remote Shell TLS options
        group = parser.add_argument_group("TLS Options")
        group.add_argument("--cert", help="Path to the server certificate file")
        group.add_argument(
            "--key",
            help="Path to the server key file "
            "(can be omitted if the key is in the certificate)",
        )
        group.add_argument(
            "--key-password",
            help="Password of the server key."
            "Set to '-' for a password request.",
        )
        group.add_argument(
            "--ca-chain",
            help="Path to the CA chain file to authenticate clients",
        )

    # Local options
    group = parser.add_argument_group("Local options")
    group.add_argument(
        "--no-input",
        action="store_true",
        help="Run without input (for daemon mode)",
    )

    # Parse them
    args = parser.parse_args(argv)

    # Handle arguments
    init = handle_common_arguments(args)

    # Set the initial bundles
    bundles = [
        "pelix.ipopo.core",
        "pelix.shell.core",
        "pelix.shell.ipopo",
        "pelix.shell.remote",
    ]
    bundles.extend(init.bundles)

    # Start a Pelix framework
    framework = pelix.framework.create_framework(
        utilities.remove_duplicates(bundles), init.properties
    )
    framework.start()
    context = framework.get_bundle_context()

    # Instantiate configured components
    init.instantiate_components(framework.get_bundle_context())

    # Instantiate a Remote Shell, if necessary
    with use_ipopo(context) as ipopo:
        rshell_name = "remote-shell"
        try:
            ipopo.get_instance_details(rshell_name)
        except ValueError:
            # Component doesn't exist, we can instantiate it.

            if ssl is not None:
                # Copy parsed arguments
                ca_chain = args.ca_chain
                cert = args.cert
                key = args.key

                # Normalize the TLS key file password argument
                if args.key_password == "-":
                    import getpass

                    key_password = getpass.getpass(
                        "Password for {}: ".format(args.key or args.cert)
                    )
                else:
                    key_password = args.key_password
            else:
                # SSL support is missing:
                # Ensure the SSL arguments are defined but set to None
                ca_chain = None
                cert = None
                key = None
                key_password = None

            # Setup the component
            rshell = ipopo.instantiate(
                pelix.shell.FACTORY_REMOTE_SHELL,
                rshell_name,
                {
                    "pelix.shell.address": args.address,
                    "pelix.shell.port": args.port,
                    "pelix.shell.ssl.ca": ca_chain,
                    "pelix.shell.ssl.cert": cert,
                    "pelix.shell.ssl.key": key,
                    "pelix.shell.ssl.key_password": key_password,
                },
            )

            # Avoid loose reference to the password
            del key_password
        else:
            logging.error(
                "A remote shell component (%s) is already "
                "configured. Abandon.",
                rshell_name,
            )
            return 1

    # Prepare a banner
    host, port = rshell.get_access()
    try:
        if args.no_input:
            # No input required: just print the access to the shell
            print("Remote shell bound to:", host, "- port:", port)

            try:
                while not framework.wait_for_stop(1):
                    # Awake from wait every second to let KeyboardInterrupt
                    # exception to raise
                    pass
            except KeyboardInterrupt:
                print("Got Ctrl+C: exiting.")
                return 127
        else:
            # Prepare interpreter variables
            variables = {
                "__name__": "__console__",
                "__doc__": None,
                "__package__": None,
                "framework": framework,
                "context": context,
                "use_ipopo": use_ipopo,
            }

            banner = (
                "{lines}\nPython interpreter with Pelix Remote Shell\n"
                "Remote shell bound to: {host}:{port}\n{lines}\n"
                "Python version: {version}\n".format(
                    lines="-" * 80, version=sys.version, host=host, port=port
                )
            )

            # Run an interpreter
            _run_interpreter(variables, banner)
    finally:
        # Stop the framework
        framework.stop()
Ejemplo n.º 47
0
    def move(self, item, folder):
        """
        Move the given item from its current folder into another folder.

        :param item: The item to move.
        :type item: dict
        :param folder: The folder to move the item into.
        :type folder: dict.
        """
        logging.error("IN MOVES IN MODELS")
        logging.error(item)
        

        self.propagateSizeChange(item, -item['size'])

        item['folderId'] = folder['_id']
        item['baseParentType'] = folder['baseParentType']
        item['baseParentId'] = folder['baseParentId']

        logging.error(item)
        logging.error("IN MOVES IN MODELS")

        self.propagateSizeChange(item, item['size'])
        x= self.save(item)
        logging.error("after saving------------------")
        logging.error(x)

        return x
Ejemplo n.º 48
0
        extension = audio_url[audio_url.rfind("."):]
        filename = i['artist'] + '-' + i['title'] + extension
        filename = filename.replace('/', '-')
        filename_mp3 = (i['artist'] + '-' + i['title'] + '.mp3').replace('/', '-')
        logging.debug('Downloading:')
        logging.debug('Artist: ' + i['artist'])
        logging.debug('Song: ' + i['title'])
        logging.debug('URL: ' + i['url'])
        logging.debug("File:" + filename)
        try:
            if os.path.exists(filename) or os.path.exists(filename_mp3):
                logging.warning('File exists')
                pass
            ur.urlretrieve(i['url'], filename, reporthook=reporthook)
            if extension != '.mp3':
                logging.debug('converting' + extension + ' to mp3')
                if subprocess.call(['avconv', '-i', filename, '-f', 'mp3', '-y', '-vn', '-ab', '70000', filename_mp3]) == 0:
                    # remove old file.
                    logging.debug('remove' + extension + ' file')
                    os.remove(filename)
            logging.info("done")
            if os.path.exists(filename) and os.path.getsize(filename) < 300:
                os.remove(filename)
            if os.path.exists(filename_mp3) and os.path.getsize(filename_mp3) < 300:
                os.remove(filename_mp3)
        except Exception as a:
            logging.error(a)
            logging.info("fail")
            pass

Ejemplo n.º 49
0
    def parse_blocks(self):
        if self.options.build < dbc.WowVersion(8, 1, 5, 0):
            entry_unpacker = struct.Struct('<4sIiIIIB3s')
        else:
            entry_unpacker = struct.Struct('<4sIIIIB3s')

        n_entries = 0
        all_entries = []
        while self.parse_offset < len(self.data):
            if self.options.build < dbc.WowVersion(8, 1, 5, 0):
                magic, game_type, unk_2, length, sig, record_id, enabled, pad = \
                        entry_unpacker.unpack_from(self.data, self.parse_offset)
            else:
                magic, unk_2, sig, record_id, length, enabled, pad = \
                        entry_unpacker.unpack_from(self.data, self.parse_offset)

            if magic != b'XFTH':
                logging.error('Invalid hotfix magic %s', magic.decode('utf-8'))
                return False

            self.parse_offset += entry_unpacker.size

            entry = {
                'record_id': record_id,
                'unk_2': unk_2,
                'enabled': enabled,
                'length': length,
                'offset': self.parse_offset,
                'sig': sig,
                'pad': codecs.encode(pad, 'hex').decode('utf-8')
            }

            if self.options.build < dbc.WowVersion(8, 1, 5, 0):
                entry['game_type'] = game_type

            if sig not in self.entries:
                self.entries[sig] = []

            if enabled:
                self.entries[sig].append(entry)
                all_entries.append(entry)

            # Skip data
            self.parse_offset += length
            n_entries += 1

        if self.options.debug:
            if self.options.build < dbc.WowVersion(8, 1, 5, 0):
                for entry in sorted(all_entries, key = lambda e: (e['unk_2'], e['sig'], e['record_id'])):
                    logging.debug('entry: { %s }',
                        ('record_id=%(record_id)-6u game_type=%(game_type)u table_hash=%(sig)#.8x ' +
                         'unk_2=%(unk_2)-5u enabled=%(enabled)u, unk_4=%(unk_4)-3u unk_5=%(unk_5)-3u ' +
                         'unk_6=%(unk_6)-3u length=%(length)-3u offset=%(offset)-7u') % entry)
            else:
                for entry in sorted(all_entries, key = lambda e: (e['sig'], e['record_id'])):
                    logging.debug('entry: { %s }',
                            ('record_id=%(record_id)-6u table_hash=%(sig)#.8x ' +
                             'unk_2=%(unk_2)#.8x enabled=%(enabled)u ' +
                             'length=%(length)-3u pad=%(pad)-6s offset=%(offset)-7u') % entry)

        logging.debug('Parsed %d hotfix entries', n_entries)

        return True
Ejemplo n.º 50
0
def main():
    global args, best_prec1, dtype
    best_prec1 = 0
    args = parser.parse_args()
    dtype = torch_dtypes.get(args.dtype)
    torch.manual_seed(args.seed)
    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = '/tmp'
    if args.save is '':
        args.save = time_stamp
    save_path = os.path.join(args.results_dir, args.save)
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    args.distributed = args.local_rank >= 0 or args.world_size > 1
    setup_logging(os.path.join(save_path, 'log.txt'),
                  resume=args.resume is not '',
                  dummy=args.distributed and args.local_rank > 0)
    results_path = os.path.join(save_path, 'results')
    results = ResultsLog(results_path,
                         title='Training Results - %s' % args.save)

    if args.distributed:
        args.device_ids = [args.local_rank]
        dist.init_process_group(backend=args.dist_backend,
                                init_method=args.dist_init,
                                world_size=args.world_size,
                                rank=args.local_rank)

    logging.info("saving to %s", save_path)
    logging.debug("run arguments: %s", args)
    logging.info("creating model %s", args.model)

    if 'cuda' in args.device and torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)
        torch.cuda.set_device(args.device_ids[0])
        cudnn.benchmark = True
    else:
        args.device_ids = None

    # create model
    model = models.__dict__[args.model]
    model_config = {'dataset': args.dataset}

    if args.model_config is not '':
        model_config = dict(model_config, **literal_eval(args.model_config))

    model = model(**model_config)
    logging.info("created model with configuration: %s", model_config)
    num_parameters = sum([l.nelement() for l in model.parameters()])
    logging.info("number of parameters: %d", num_parameters)

    # optionally resume from a checkpoint
    if args.evaluate:
        if not os.path.isfile(args.evaluate):
            parser.error('invalid checkpoint: {}'.format(args.evaluate))
        checkpoint = torch.load(args.evaluate)
        model.load_state_dict(checkpoint['state_dict'])
        logging.info("loaded checkpoint '%s' (epoch %s)", args.evaluate,
                     checkpoint['epoch'])
    elif args.resume:
        checkpoint_file = args.resume
        if os.path.isdir(checkpoint_file):
            results.load(os.path.join(checkpoint_file, 'results.csv'))
            checkpoint_file = os.path.join(checkpoint_file,
                                           'model_best.pth.tar')
        if os.path.isfile(checkpoint_file):
            logging.info("loading checkpoint '%s'", args.resume)
            checkpoint = torch.load(checkpoint_file)
            args.start_epoch = checkpoint['epoch'] - 1
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            logging.info("loaded checkpoint '%s' (epoch %s)", checkpoint_file,
                         checkpoint['epoch'])
        else:
            logging.error("no checkpoint found at '%s'", args.resume)

    # define loss function (criterion) and optimizer
    loss_params = {}
    if args.label_smoothing > 0:
        loss_params['smooth_eps'] = args.label_smoothing
    criterion = getattr(model, 'criterion', CrossEntropyLoss)(**loss_params)
    criterion.to(args.device, dtype)
    model.to(args.device, dtype)

    # optimizer configuration
    optim_regime = getattr(model, 'regime', [{
        'epoch': 0,
        'optimizer': args.optimizer,
        'lr': args.lr,
        'momentum': args.momentum,
        'weight_decay': args.weight_decay
    }])

    optimizer = OptimRegime(model, optim_regime)

    trainer = Trainer(model,
                      criterion,
                      optimizer,
                      device_ids=args.device_ids,
                      device=args.device,
                      dtype=dtype,
                      distributed=args.distributed,
                      local_rank=args.local_rank,
                      grad_clip=args.grad_clip,
                      print_freq=args.print_freq)

    # Evaluation Data loading code
    args.eval_batch_size = args.eval_batch_size if args.eval_batch_size > 0 else args.batch_size
    val_data = DataRegime(getattr(model, 'data_eval_regime', None),
                          defaults={
                              'datasets_path': args.datasets_dir,
                              'name': args.dataset,
                              'split': 'val',
                              'augment': False,
                              'input_size': args.input_size,
                              'batch_size': args.eval_batch_size,
                              'shuffle': False,
                              'num_workers': args.workers,
                              'pin_memory': True,
                              'drop_last': False
                          })

    if args.evaluate:
        results = trainer.validate(val_data.get_loader())
        logging.info(results)
        return

    # Training Data loading code
    train_data = DataRegime(getattr(model, 'data_regime', None),
                            defaults={
                                'datasets_path': args.datasets_dir,
                                'name': args.dataset,
                                'split': 'train',
                                'augment': True,
                                'input_size': args.input_size,
                                'batch_size': args.batch_size,
                                'shuffle': True,
                                'num_workers': args.workers,
                                'pin_memory': True,
                                'drop_last': True,
                                'distributed': args.distributed,
                                'duplicates': args.duplicates,
                                'cutout': {
                                    'holes': 1,
                                    'length': 16
                                } if args.cutout else None
                            })

    logging.info('optimization regime: %s', optim_regime)
    trainer.training_steps = args.start_epoch * len(train_data)
    for epoch in range(args.start_epoch, args.epochs):
        trainer.epoch = epoch
        train_data.set_epoch(epoch)
        val_data.set_epoch(epoch)
        logging.info('\nStarting Epoch: {0}\n'.format(epoch + 1))

        # train for one epoch
        train_results = trainer.train(train_data.get_loader(),
                                      duplicates=args.duplicates,
                                      chunk_batch=args.chunk_batch)

        # evaluate on validation set
        val_results = trainer.validate(val_data.get_loader())

        if args.distributed and args.local_rank > 0:
            continue

        # remember best prec@1 and save checkpoint
        is_best = val_results['prec1'] > best_prec1
        best_prec1 = max(val_results['prec1'], best_prec1)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': args.model,
                'config': args.model_config,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1
            },
            is_best,
            path=save_path)

        logging.info('\nResults - Epoch: {0}\n'
                     'Training Loss {train[loss]:.4f} \t'
                     'Training Prec@1 {train[prec1]:.3f} \t'
                     'Training Prec@5 {train[prec5]:.3f} \t'
                     'Validation Loss {val[loss]:.4f} \t'
                     'Validation Prec@1 {val[prec1]:.3f} \t'
                     'Validation Prec@5 {val[prec5]:.3f} \t\n'.format(
                         epoch + 1, train=train_results, val=val_results))

        values = dict(epoch=epoch + 1, steps=trainer.training_steps)
        values.update({'training ' + k: v for k, v in train_results.items()})
        values.update({'validation ' + k: v for k, v in val_results.items()})
        results.add(**values)

        results.plot(x='epoch',
                     y=['training loss', 'validation loss'],
                     legend=['training', 'validation'],
                     title='Loss',
                     ylabel='loss')
        results.plot(x='epoch',
                     y=['training error1', 'validation error1'],
                     legend=['training', 'validation'],
                     title='Error@1',
                     ylabel='error %')
        results.plot(x='epoch',
                     y=['training error5', 'validation error5'],
                     legend=['training', 'validation'],
                     title='Error@5',
                     ylabel='error %')
        if 'grad' in train_results.keys():
            results.plot(x='epoch',
                         y=['training grad'],
                         legend=['gradient L2 norm'],
                         title='Gradient Norm',
                         ylabel='value')
        results.save()
Ejemplo n.º 51
0
def charge(opportunity):

    amount = amount_to_charge(opportunity)
    logging.info(
        f"---- Charging ${amount} to {opportunity.stripe_customer} ({opportunity.name})"
    )
    if opportunity.stage_name != "Pledged":
        raise Exception(f"Opportunity {opportunity.id} is not Pledged")
    if opportunity.quarantined:
        logging.info("---- Skipping because it's quarantined")
        raise QuarantinedException(f"Opportunity {opportunity.id} is quarantined")

    opportunity.stage_name = "In Process"
    opportunity.save()

    try:
        card_charge = stripe.Charge.create(
            customer=opportunity.stripe_customer,
            amount=int(amount * 100),
            currency="usd",
            description=generate_stripe_description(opportunity),
            metadata={
                "opportunity_id": opportunity.id,
                "account_id": opportunity.account_id,
            },
        )
    except Exception as e:
        logging.info(f"Error charging card: {type(e)}")
        if isinstance(e, stripe.error.StripeError):
            message = e.user_message or ""
            logging.info(f"Message: {message}")

            reason = e.user_message

            if isinstance(e, stripe.error.CardError):
                logging.info("The card has been declined")
                logging.info(f"Decline code: {e.json_body.get('decline_code', '')}")

                if reason is None:
                    reason = "card declined for unknown reason"

            if reason is None:
                reason = "unknown failure"
        else:
            reason = "unknown failure"

        opportunity.closed_lost_reason = reason
        opportunity.stage_name = "Closed Lost"
        opportunity.save()
        logging.debug(
            f"Opportunity set to '{opportunity.stage_name}' with reason: {opportunity.closed_lost_reason}"
        )
        if opportunity.type == "Giving Circle":
            user = User.get(CIRCLE_FAILURE_RECIPIENT)
            subject = "Credit card charge failed for Circle member"
            task = Task(owner_id=user.id, what_id=opportunity.id, subject=subject)
            task.save()
            send_slack_message(
                {
                    "channel": "#circle-failures",
                    "text": f"Circle charge failed for {opportunity.name} [{opportunity.closed_lost_reason}]",
                    "icon_emoji": ":x:",
                }
            )

        raise ChargeException(opportunity, reason)

    if card_charge.status != "succeeded":
        logging.error("Charge failed. Check Stripe logs.")
        raise ChargeException(opportunity, "charge failed")

    # There's a lot going on here. Up to this point the donor selected an
    # amount (say $100) and decided if they wanted to pay our processing
    # fees. We recorded those two bits of information in the opportunity or the
    # RDO. Now we're actually charging the card so we have new information:
    # what the actual processing fees. So we we move stuff around. The
    # original amount the donor selected was stored in the "amount" field of
    # the opportunity or the RDO. That amount gets moved to "donor selected
    # amount" on the opportunity. Now the amount field on the opportunity will
    # represeent the gross amount (the point of this whole thing) and the
    # amount minus processing fees gets stored on the opportunity field in "net
    # amount". We didn't know that amount up until the charge took place
    # because Amex.
    balance_transaction = stripe.BalanceTransaction.retrieve(
        card_charge.balance_transaction
    )
    opportunity.donor_selected_amount = opportunity.amount
    opportunity.net_amount = balance_transaction.net / 100
    opportunity.amount = amount  # gross
    gross = card_charge.amount / 100

    opportunity.stripe_card = card_charge.source.id
    opportunity.stripe_transaction_id = card_charge.id
    opportunity.stage_name = "Closed Won"
    opportunity.save()
Ejemplo n.º 52
0
def main():

    global config, options

    # Parse command line...
    parser = ArgumentParser(usage="%(prog)s [options] [APPID [APPID ...]]")
    common.setup_global_opts(parser)
    parser.add_argument("appid", nargs='*', help=_("applicationId to check for updates"))
    parser.add_argument("--auto", action="store_true", default=False,
                        help=_("Process auto-updates"))
    parser.add_argument("--autoonly", action="store_true", default=False,
                        help=_("Only process apps with auto-updates"))
    parser.add_argument("--commit", action="store_true", default=False,
                        help=_("Commit changes"))
    parser.add_argument("--allow-dirty", action="store_true", default=False,
                        help=_("Run on git repo that has uncommitted changes"))
    parser.add_argument("--gplay", action="store_true", default=False,
                        help=_("Only print differences with the Play Store"))
    metadata.add_metadata_arguments(parser)
    options = parser.parse_args()
    metadata.warnings_action = options.W

    config = common.read_config(options)

    if not options.allow_dirty:
        status = subprocess.check_output(['git', 'status', '--porcelain'])
        if status:
            logging.error(_('Build metadata git repo has uncommited changes!'))
            sys.exit(1)

    # Get all apps...
    allapps = metadata.read_metadata()

    apps = common.read_app_args(options.appid, allapps, False)

    gplaylog = ''
    if options.gplay:
        for appid, app in apps.items():
            gplaylog += '* ' + appid + '\n'
            version, reason = check_gplay(app)
            if version is None:
                if reason == '404':
                    logging.info("{0} is not in the Play Store".format(common.getappname(app)))
                else:
                    logging.info("{0} encountered a problem: {1}".format(common.getappname(app), reason))
            if version is not None:
                stored = app.CurrentVersion
                if not stored:
                    logging.info("{0} has no Current Version but has version {1} on the Play Store"
                                 .format(common.getappname(app), version))
                elif LooseVersion(stored) < LooseVersion(version):
                    logging.info("{0} has version {1} on the Play Store, which is bigger than {2}"
                                 .format(common.getappname(app), version, stored))
                else:
                    if stored != version:
                        logging.info("{0} has version {1} on the Play Store, which differs from {2}"
                                     .format(common.getappname(app), version, stored))
                    else:
                        logging.info("{0} has the same version {1} on the Play Store"
                                     .format(common.getappname(app), version))
        update_wiki(gplaylog, None)
        return

    locallog = ''
    for appid, app in apps.items():

        if options.autoonly and app.AutoUpdateMode in ('None', 'Static'):
            logging.debug(_("Nothing to do for {appid}.").format(appid=appid))
            continue

        msg = _("Processing {appid}").format(appid=appid)
        logging.info(msg)
        locallog += '* ' + msg + '\n'

        try:
            checkupdates_app(app)
        except Exception as e:
            msg = _("...checkupdate failed for {appid} : {error}").format(appid=appid, error=e)
            logging.error(msg)
            locallog += msg + '\n'

    update_wiki(None, locallog)

    logging.info(_("Finished"))
    def _RunTest(self, device, test):
        device.ClearApplicationState(self._test_instance.package)

        # Chrome crashes are not always caught by Monkey test runner.
        # Launch Chrome and verify Chrome has the same PID before and after
        # the test.
        device.StartActivity(intent.Intent(
            package=self._test_instance.package,
            activity=self._test_instance.activity,
            action='android.intent.action.MAIN'),
                             blocking=True,
                             force_stop=True)
        before_pids = device.GetPids(self._test_instance.package)

        output = ''
        if before_pids:
            if len(before_pids.get(self._test_instance.package, [])) > 1:
                raise Exception(
                    'At most one instance of process %s expected but found pids: '
                    '%s' % (self._test_instance.package, before_pids))
            output = '\n'.join(self._LaunchMonkeyTest(device))
            after_pids = device.GetPids(self._test_instance.package)

        crashed = True
        if not self._test_instance.package in before_pids:
            logging.error('Failed to start the process.')
        elif not self._test_instance.package in after_pids:
            logging.error('Process %s has died.',
                          before_pids[self._test_instance.package])
        elif (before_pids[self._test_instance.package] !=
              after_pids[self._test_instance.package]):
            logging.error('Detected process restart %s -> %s',
                          before_pids[self._test_instance.package],
                          after_pids[self._test_instance.package])
        else:
            crashed = False

        success_pattern = 'Events injected: %d' % self._test_instance.event_count
        if success_pattern in output and not crashed:
            result = base_test_result.BaseTestResult(
                test, base_test_result.ResultType.PASS, log=output)
        else:
            result = base_test_result.BaseTestResult(
                test, base_test_result.ResultType.FAIL, log=output)
            if 'chrome' in self._test_instance.package:
                logging.warning('Starting MinidumpUploadService...')
                # TODO(jbudorick): Update this after upstreaming.
                minidump_intent = intent.Intent(
                    action='%s.crash.ACTION_FIND_ALL' % _CHROME_PACKAGE,
                    package=self._test_instance.package,
                    activity='%s.crash.MinidumpUploadService' %
                    _CHROME_PACKAGE)
                try:
                    device.RunShellCommand(['am', 'startservice'] +
                                           minidump_intent.am_args,
                                           as_root=True,
                                           check_return=True)
                except device_errors.CommandFailedError:
                    logging.exception('Failed to start MinidumpUploadService')

        return result, None
Ejemplo n.º 54
0
    def post(self):
        user = users.get_current_user()
        usr_info = usr_mgt.retrieve(user)

        if user and usr_info:
            # Retrieve values
            cartridge_model = self.request.get("cartridge_model", "").strip()
            printer_maker = self.request.get("printer_maker", "").strip()
            printer_model = self.request.get("printer_model", "").strip()
            str_num_units = self.request.get("number_of_units", "").strip()
            client = self.request.get("client", "").strip()
            client_email = self.request.get("client_email", "").strip()
            num_units = 1

            # Chk
            if len(cartridge_model) < 3:
                self.redirect(
                    "/error?msg=Aborted modification: missing cartridge model")
                return

            if len(printer_maker) < 2:
                self.redirect(
                    "/error?msg=Aborted modification: missing printer maker")
                return

            if len(printer_model) < 5:
                self.redirect(
                    "/error?msg=Aborted modification: missing printer model")
                return

            try:
                num_units = int(str_num_units)
            except ValueError:
                logs.error("Invalid number of units: " + str_num_units)

            if (num_units < 1 or num_units > 10):
                logs.error("Invalid number of units (1 < units < 10): " +
                           str_num_units)

            # Create ticket
            ticket = tickets.create(usr_info)

            ticket.born = True
            ticket.progress = Ticket.Progress.Tracked
            ticket.status = Ticket.Status.Open
            ticket.priority = Ticket.Priority.Low
            ticket.type = Ticket.Type.Supplies

            ticket.title = u"Toner: " + printer_maker\
                           + u" " + printer_model\
                           + u" " + cartridge_model
            ticket.desc = u"Client: " + client + u"\nInk cartridge requested for: "\
                          + printer_maker + " " + printer_model + '\n'\
                          + u"Cartridge #" + cartridge_model + "\n"\
                          + unicode(num_units) + u" units"
            ticket.client_email = client_email
            ticket.classroom = ""

            # Report
            tickets.send_email_for(ticket, "Ink cartridge request",
                                   "    by " + unicode(usr_info))

            # Save
            tickets.update(ticket)
            self.redirect(
                "/info?url=/manage_tickets&msg=Ink cartridge requested: " +
                ticket.title.replace('#', '').encode("ascii", "replace"))
        else:
            self.redirect("/")
Ejemplo n.º 55
0
                    follow_redirects, deadline, validate_certificate)
    except Exception, e:
        trace = traceback.format_exc()
        body = u"url: %s\npayload: %s\ntrace: %s" % (url, payload, trace)

        logging.error(u"Exception caught by safe_fetch:\n %s" % trace)
        if notify:
            notify_by_email(u"Exception caught by safe_fetch", body)

    if res:
        if debug:
            logging.info(u"safe_fetch returned %s: content = %s" %
                         (res.status_code, res.content))

        if res.status_code != 200:
            logging.error(u"safe_fetch returned %s: content=%s" %
                          (res.status_code, res.content))
            if notify:
                notify_by_email(
                    u"safe_fetch failed",
                    u"safe_fetch returned %s for\nurl: %s\npayload: %s\n content:%s"
                    % (res.status_code, url, payload, res.content))
            res = None

    return res


def base_datepicker_page(request,
                         f_data,
                         template_name,
                         wrapper_locals,
                         init_start_date=None,
Ejemplo n.º 56
0
    def rename(self, _files, current_path):
        """ Rename for Generic files """
        logging.debug("Renaming Generic file")

        def filter_files(_file, current_path):
            if is_full_path(_file):
                filepath = os.path.normpath(_file)
            else:
                filepath = os.path.normpath(os.path.join(current_path, _file))
            if os.path.exists(filepath):
                size = os.stat(filepath).st_size
                if size >= cfg.movie_rename_limit.get_int() and not RE_SAMPLE.search(_file) \
                   and get_ext(_file) not in EXCLUDED_FILE_EXTS:
                    return True
            return False

        # remove any files below the limit from this list
        files = [_file for _file in _files if filter_files(_file, current_path)]

        length = len(files)
        # Single File Handling
        if length == 1:
            file = files[0]
            if is_full_path(file):
                filepath = os.path.normpath(file)
            else:
                filepath = os.path.normpath(os.path.join(current_path, file))
            if os.path.exists(filepath):
                self.fname, ext = os.path.splitext(os.path.split(file)[1])
                newname = "%s%s" % (self.filename_set, ext)
                newname = newname.replace('%fn', self.fname)
                newpath = os.path.join(current_path, newname)
                try:
                    logging.debug("Rename: %s to %s", filepath, newpath)
                    renamer(filepath, newpath)
                except:
                    logging.error(T('Failed to rename: %s to %s'), clip_path(filepath), clip_path(newpath))
                    logging.info("Traceback: ", exc_info=True)
                rename_similar(current_path, ext, self.filename_set, ())

        # Sequence File Handling
        # if there is more than one extracted file check for CD1/1/A in the title
        elif self.extra:
            matched_files = check_for_multiple(files)
            # rename files marked as in a set
            if matched_files:
                logging.debug("Renaming a series of generic files (%s)", matched_files)
                renamed = matched_files.values()
                for index, file in matched_files.iteritems():
                    filepath = os.path.join(current_path, file)
                    renamed.append(filepath)
                    self.fname, ext = os.path.splitext(os.path.split(file)[1])
                    name = '%s%s' % (self.filename_set, self.extra)
                    name = name.replace('%1', str(index)).replace('%fn', self.fname)
                    name = name + ext
                    newpath = os.path.join(current_path, name)
                    try:
                        logging.debug("Rename: %s to %s", filepath, newpath)
                        renamer(filepath, newpath)
                    except:
                        logging.error(T('Failed to rename: %s to %s'), clip_path(filepath), clip_path(newpath))
                        logging.info("Traceback: ", exc_info=True)
                rename_similar(current_path, ext, self.filename_set, renamed)
            else:
                logging.debug("Movie files not in sequence %s", _files)
Ejemplo n.º 57
0
class ChromeTests(object):
    '''This class is derived from the chrome_tests.py file in ../purify/.
  '''
    def __init__(self, options, args, test):
        # The known list of tests.
        # Recognise the original abbreviations as well as full executable names.
        self._test_list = {
            "base": self.TestBase,
            "base_unittests": self.TestBase,
            "browser": self.TestBrowser,
            "browser_tests": self.TestBrowser,
            "crypto": self.TestCrypto,
            "crypto_unittests": self.TestCrypto,
            "googleurl": self.TestGURL,
            "googleurl_unittests": self.TestGURL,
            "content": self.TestContent,
            "content_unittests": self.TestContent,
            "courgette": self.TestCourgette,
            "courgette_unittests": self.TestCourgette,
            "ipc": self.TestIpc,
            "ipc_tests": self.TestIpc,
            "layout": self.TestLayout,
            "layout_tests": self.TestLayout,
            "media": self.TestMedia,
            "media_unittests": self.TestMedia,
            "net": self.TestNet,
            "net_unittests": self.TestNet,
            "printing": self.TestPrinting,
            "printing_unittests": self.TestPrinting,
            "remoting": self.TestRemoting,
            "remoting_unittests": self.TestRemoting,
            "startup": self.TestStartup,
            "startup_tests": self.TestStartup,
            "sync": self.TestSync,
            "sync_unit_tests": self.TestSync,
            "test_shell": self.TestTestShell,
            "test_shell_tests": self.TestTestShell,
            "ui": self.TestUI,
            "ui_tests": self.TestUI,
            "unit": self.TestUnit,
            "unit_tests": self.TestUnit,
            "views": self.TestViews,
            "views_unittests": self.TestViews,
            "sql": self.TestSql,
            "sql_unittests": self.TestSql,
            "ui_unit": self.TestUIUnit,
            "ui_unittests": self.TestUIUnit,
            "gfx": self.TestGfx,
            "gfx_unittests": self.TestGfx,
        }

        if test not in self._test_list:
            raise TestNotFound("Unknown test: %s" % test)

        self._options = options
        self._args = args
        self._test = test

        script_dir = path_utils.ScriptDir()

        # Compute the top of the tree (the "source dir") from the script dir (where
        # this script lives).  We assume that the script dir is in tools/asan/
        # relative to the top of the tree.
        self._source_dir = os.path.dirname(os.path.dirname(script_dir))

        # Since this path is used for string matching, make sure it's always
        # an absolute Unix-style path.
        self._source_dir = os.path.abspath(self._source_dir).replace('\\', '/')

        asan_test_script = os.path.join(script_dir, "asan_test.py")
        self._command_preamble = [asan_test_script]

    def _DefaultCommand(self, module, exe=None, asan_test_args=None):
        '''Generates the default command array that most tests will use.

    Args:
      module: The module name (corresponds to the dir in src/ where the test
              data resides).
      exe: The executable name.
      asan_test_args: additional arguments to append to the command line.
    Returns:
      A string with the command to run the test.
    '''
        if not self._options.build_dir:
            dirs = [
                os.path.join(self._source_dir, "xcodebuild", "Debug"),
                os.path.join(self._source_dir, "out", "Debug"),
            ]
            if exe:
                self._options.build_dir = FindDirContainingNewestFile(
                    dirs, exe)
            else:
                self._options.build_dir = FindNewestDir(dirs)

        cmd = list(self._command_preamble)

        if asan_test_args != None:
            for arg in asan_test_args:
                cmd.append(arg)
        if exe:
            cmd.append(os.path.join(self._options.build_dir, exe))
            # Show elapased time so we can find the slowpokes.
            cmd.append("--gtest_print_time")
        if self._options.gtest_repeat:
            cmd.append("--gtest_repeat=%s" % self._options.gtest_repeat)
        return cmd

    def Suppressions(self):
        '''Builds the list of available suppressions files.'''
        ret = []
        directory = path_utils.ScriptDir()
        suppression_file = os.path.join(directory, "suppressions.txt")
        if os.path.exists(suppression_file):
            ret.append(suppression_file)
        suppression_file = os.path.join(directory, "suppressions_linux.txt")
        if os.path.exists(suppression_file):
            ret.append(suppression_file)
        return ret

    def Run(self):
        '''Runs the test specified by command-line argument --test.'''
        logging.info("running test %s" % (self._test))
        return self._test_list[self._test]()

    def _ReadGtestFilterFile(self, name, cmd):
        '''Reads files which contain lists of tests to filter out with
    --gtest_filter and appends the command-line option to |cmd|.

    Args:
      name: the test executable name.
      cmd: the test running command line to be modified.
    '''
        filters = []
        directory = path_utils.ScriptDir()
        gtest_filter_files = [
            os.path.join(directory, name + ".gtest-asan.txt"),
            # TODO(glider): Linux vs. CrOS?
        ]
        logging.info("Reading gtest exclude filter files:")
        for filename in gtest_filter_files:
            # strip the leading absolute path (may be very long on the bot)
            # and the following / or \.
            readable_filename = filename.replace(self._source_dir, "")[1:]
            if not os.path.exists(filename):
                logging.info("  \"%s\" - not found" % readable_filename)
                continue
            logging.info("  \"%s\" - OK" % readable_filename)
            f = open(filename, 'r')
            for line in f.readlines():
                if line.startswith("#") or line.startswith(
                        "//") or line.isspace():
                    continue
                line = line.rstrip()
                filters.append(line)
        gtest_filter = self._options.gtest_filter
        if len(filters):
            if gtest_filter:
                gtest_filter += ":"
                if gtest_filter.find("-") < 0:
                    gtest_filter += "-"
            else:
                gtest_filter = "-"
            gtest_filter += ":".join(filters)
        if gtest_filter:
            cmd.append("--gtest_filter=%s" % gtest_filter)

    def SimpleTest(self, module, name, asan_test_args=None, cmd_args=None):
        '''Builds the command line and runs the specified test.

    Args:
      module: The module name (corresponds to the dir in src/ where the test
              data resides).
      name: The executable name.
      asan_test_args: Additional command line args for asan.
      cmd_args: Additional command line args for the test.
    '''
        cmd = self._DefaultCommand(module, name, asan_test_args)
        supp = self.Suppressions()
        self._ReadGtestFilterFile(name, cmd)
        if cmd_args:
            cmd.extend(["--"])
            cmd.extend(cmd_args)

        # Sets LD_LIBRARY_PATH to the build folder so external libraries can be
        # loaded.
        if (os.getenv("LD_LIBRARY_PATH")):
            os.putenv(
                "LD_LIBRARY_PATH", "%s:%s" %
                (os.getenv("LD_LIBRARY_PATH"), self._options.build_dir))
        else:
            os.putenv("LD_LIBRARY_PATH", self._options.build_dir)
        return asan_test.RunTool(cmd, supp, module)

    def TestBase(self):
        return self.SimpleTest("base", "base_unittests")

    def TestBrowser(self):
        return self.SimpleTest("chrome", "browser_tests")

    def TestCrypto(self):
        return self.SimpleTest("crypto", "crypto_unittests")

    def TestGURL(self):
        return self.SimpleTest("chrome", "googleurl_unittests")

    def TestContent(self):
        return self.SimpleTest("content", "content_unittests")

    def TestCourgette(self):
        return self.SimpleTest("courgette", "courgette_unittests")

    def TestMedia(self):
        return self.SimpleTest("chrome", "media_unittests")

    def TestPrinting(self):
        return self.SimpleTest("chrome", "printing_unittests")

    def TestRemoting(self):
        return self.SimpleTest("chrome", "remoting_unittests")

    def TestSync(self):
        return self.SimpleTest("chrome", "sync_unit_tests")

    def TestIpc(self):
        return self.SimpleTest("ipc", "ipc_tests")

    def TestNet(self):
        return self.SimpleTest("net", "net_unittests")

    def TestStartup(self):
        # We don't need the performance results, we're just looking for pointer
        # errors, so set number of iterations down to the minimum.
        os.putenv("STARTUP_TESTS_NUMCYCLES", "1")
        logging.info("export STARTUP_TESTS_NUMCYCLES=1")
        return self.SimpleTest("chrome", "startup_tests")

    def TestTestShell(self):
        return self.SimpleTest("webkit", "test_shell_tests")

    def TestUnit(self):
        return self.SimpleTest("chrome", "unit_tests")

    def TestViews(self):
        return self.SimpleTest("views", "views_unittests")

    def TestSql(self):
        return self.SimpleTest("chrome", "sql_unittests")

    def TestUIUnit(self):
        return self.SimpleTest("chrome", "ui_unittests")

    def TestGfx(self):
        return self.SimpleTest("chrome", "gfx_unittests")

    def TestUI(self):
        return self.SimpleTest("chrome",
                               "ui_tests",
                               cmd_args=[
                                   "--ui-test-action-timeout=80000",
                                   "--ui-test-action-max-timeout=180000"
                               ])

    def TestLayoutChunk(self, chunk_num, chunk_size):
        '''Runs tests [chunk_num*chunk_size .. (chunk_num+1)*chunk_size).

    Wrap around to beginning of list at end. If chunk_size is zero, run all
    tests in the list once. If a text file is given as argument, it is used as
    the list of tests.
    '''
        # Build the ginormous commandline in 'cmd'.
        # It's going to be roughly
        #  python asan_test.py ... python run_webkit_tests.py ...
        # but we'll use the --indirect flag to asan_test.py
        # to avoid asaning python.
        # Start by building the asan_test.py commandline.
        cmd = self._DefaultCommand("webkit")

        # Now build script_cmd, the run_webkits_tests.py commandline
        # Store each chunk in its own directory so that we can find the data later
        chunk_dir = os.path.join("layout", "chunk_%05d" % chunk_num)
        test_shell = os.path.join(self._options.build_dir, "test_shell")
        out_dir = os.path.join(path_utils.ScriptDir(), "latest")
        out_dir = os.path.join(out_dir, chunk_dir)
        if os.path.exists(out_dir):
            old_files = glob.glob(os.path.join(out_dir, "*.txt"))
            for f in old_files:
                os.remove(f)
        else:
            os.makedirs(out_dir)

        script = os.path.join(self._source_dir, "webkit", "tools",
                              "layout_tests", "run_webkit_tests.py")
        script_cmd = [
            "python", script, "--run-singly", "-v", "--noshow-results",
            "--time-out-ms=200000", "--nocheck-sys-deps"
        ]

        # Pass build mode to run_webkit_tests.py.  We aren't passed it directly,
        # so parse it out of build_dir.  run_webkit_tests.py can only handle
        # the two values "Release" and "Debug".
        # TODO(Hercules): unify how all our scripts pass around build mode
        # (--mode / --target / --build_dir / --debug)
        if self._options.build_dir.endswith("Debug"):
            script_cmd.append("--debug")
        if (chunk_size > 0):
            script_cmd.append("--run-chunk=%d:%d" % (chunk_num, chunk_size))
        if len(self._args):
            # if the arg is a txt file, then treat it as a list of tests
            if os.path.isfile(self._args[0]) and self._args[0][-4:] == ".txt":
                script_cmd.append("--test-list=%s" % self._args[0])
            else:
                script_cmd.extend(self._args)
        self._ReadGtestFilterFile("layout", script_cmd)

        # Now run script_cmd with the wrapper in cmd
        cmd.extend(["--"])
        cmd.extend(script_cmd)
        supp = self.Suppressions()
        return asan_test.RunTool(cmd, supp, "layout")

    def TestLayout(self):
        '''Runs the layout tests.'''
        # A "chunk file" is maintained in the local directory so that each test
        # runs a slice of the layout tests of size chunk_size that increments with
        # each run.  Since tests can be added and removed from the layout tests at
        # any time, this is not going to give exact coverage, but it will allow us
        # to continuously run small slices of the layout tests under purify rather
        # than having to run all of them in one shot.
        chunk_size = self._options.num_tests
        if (chunk_size == 0):
            return self.TestLayoutChunk(0, 0)
        chunk_num = 0
        chunk_file = os.path.join("asan_layout_chunk.txt")

        logging.info("Reading state from " + chunk_file)
        try:
            f = open(chunk_file)
            if f:
                str = f.read()
                if len(str):
                    chunk_num = int(str)
                # This should be enough so that we have a couple of complete runs
                # of test data stored in the archive (although note that when we loop
                # that we almost guaranteed won't be at the end of the test list)
                if chunk_num > 10000:
                    chunk_num = 0
                f.close()
        except IOError, (errno, strerror):
            logging.error("error reading from file %s (%d, %s)" %
                          (chunk_file, errno, strerror))
        ret = self.TestLayoutChunk(chunk_num, chunk_size)

        # Wait until after the test runs to completion to write out the new chunk
        # number.  This way, if the bot is killed, we'll start running again from
        # the current chunk rather than skipping it.
        logging.info("Saving state to " + chunk_file)
        try:
            f = open(chunk_file, "w")
            chunk_num += 1
            f.write("%d" % chunk_num)
            f.close()
        except IOError, (errno, strerror):
            logging.error("error writing to file %s (%d, %s)" %
                          (chunk_file, errno, strerror))
Ejemplo n.º 58
0
    def GetArtwork(self, artworkname, resolution):
        """
        This method returns a valid path to an artwork with the specified resolution.

        The final path will consist of the *artworkdir* given as class parameter, the *resolution* as subdirectory and the *artworkname* as filename. (``{artworkdir}/{resolution}/{artworkname}``)

        If the artwork does not exist for this resolution it will be generated.
        If the directory for that scale does not exist, it will be created.
        In case an error occcurs, an exception gets raised.

        The resolution is given as string in the format ``{X}x{Y}`` (For example: ``100x100``).
        *X* and *Y* must have the same value.
        This method expects an aspect ratio of 1:1.

        Beside scaling the JPEG, it will be made progressive.

        Args:
            artworkname (str): filename of the source artwork (Usually ``$Artist - $Album.jpg``)
            resolution (str): resolution of the requested artwork

        Returns:
            Relative path to the artwork in the specified resolution.

        Raises:
            ValueError: When the source file does not exist

        Example:

            .. code-block:: python

                cache = ArtworkCache("/data/artwork")
                path  = cache.GetArtwork("example.jpg", "150x150")
                # returned path: "150x150/example.jpg"
                # absolute path: "/data/artwork/150x150/example.jpg"
        """
        logging.debug("GetArtwork(%s, %s)", artworkname, resolution)

        # Check if source exists
        if not self.artworkroot.Exists(artworkname):
            logging.error("Source file %s does not exist in the artwork root directory!", artworkname)
            raise ValueError("Source file %s does not exist in the artwork root directory!", 
                    artworkname)

        # Check if already scaled. If yes, our job is done
        scaledfile = os.path.join(resolution, artworkname)
        if self.artworkroot.Exists(scaledfile):
            return scaledfile

        # Check if the scale-directory already exist. If not, create one
        if not self.artworkroot.IsDirectory(resolution):
            logging.debug("Creating subdirectory: %s", resolution)
            self.artworkroot.CreateSubdirectory(resolution)

        # Scale image
        logging.debug("Converting image to %s", resolution)
        abssrcpath = self.artworkroot.AbsolutePath(artworkname)
        absdstpath = self.artworkroot.AbsolutePath(scaledfile)

        # "10x10" -> (10, 10)
        length = int(resolution.split("x")[0])
        size   = (length, length)

        im = Image.open(abssrcpath)
        im.thumbnail(size, Image.BICUBIC)
        im.save(absdstpath, "JPEG", optimize=True, progressive=True)

        return scaledfile
Ejemplo n.º 59
0
        help="for layout tests: # of subtests per run.  0 for all.")

    options, args = parser.parse_args()

    if options.verbose:
        logging_utils.config_root(logging.DEBUG)
    else:
        logging_utils.config_root()

    if not options.test or not len(options.test):
        parser.error("--test not specified")

    for t in options.test:
        tests = ChromeTests(options, args, t)
        ret = tests.Run()
        if ret:
            return ret
    return 0


if __name__ == "__main__":
    if sys.platform.startswith('linux'):
        ret = _main(sys.argv)
    elif sys.platform.startswith('darwin'):
        ret = _main(sys.argv)
    else:
        logging.error("AddressSanitizer works only on Linux and Mac OS "
                      "at the moment.")
        ret = 1
    sys.exit(ret)
Ejemplo n.º 60
0
def api(req):
    httpresponse = None
    if req.method == "POST":
        tracker = req.GET.get("tracker")
        trackers = tracker.split(",")
        print("tracker:{}".format(tracker))
        if tracker:
            if "ffxiv-eureka" in trackers:
                instance = req.GET.get("instance")
                password = req.GET.get("password")
                print("ffxiv-eureka {}:{}".format(instance, password))
                if instance and password:
                    nm_name = req.POST.get("text")
                    if nm_name:
                        nm_id = get_nm_id("ffxiv-eureka", nm_name)
                        print("nm_name:{} id:{}".format(nm_name, nm_id))
                        if nm_id > 0:
                            print("nm_name:{} nm_id:{}".format(nm_name, nm_id))
                            # ws = create_connection("wss://ffxiv-eureka.com/socket/websocket?vsn=2.0.0")
                            ws = create_connection(
                                "wss://ffxiv-eureka.com/socket/websocket?vsn=2.0.0"
                            )
                            msg = '["1","1","instance:{}","phx_join",{{"password":"******"}}]'.format(
                                instance, password
                            )
                            # print(msg)
                            ws.send(msg)
                            msg = '["1","2","instance:{}","set_kill_time",{{"id":{},"time":{}}}]'.format(
                                instance, nm_id, int(time.time() * 1000)
                            )
                            # print(msg)
                            ws.send(msg)
                            ws.close()
                            httpresponse = HttpResponse("OK", status=200)
                    else:
                        print("no nm_name")
            if "ffxivsc" in trackers:
                key = req.GET.get("key")
                # print("ffxiv-eureka {}:{}".format(instance,password))
                if key:
                    nm_name = req.POST.get("text")
                    if nm_name:
                        nm_level_type = get_nm_id("ffxivsc", nm_name)
                        if int(nm_level_type["type"]) > 0:
                            url = (
                                "https://nps.ffxivsc.cn/lobby/addKillTime"
                            )
                            post_data = {
                                "killTime": strftime(
                                    "%Y-%m-%d %H:%M", time.localtime()
                                ),
                                "level": "{}".format(nm_level_type["level"]),
                                "key": key,
                                "type": "{}".format(nm_level_type["type"]),
                            }
                            r = requests.post(url=url, data=post_data)
                            httpresponse = HttpResponse(r)
                    else:
                        print("no nm_name")
            if "qq" in trackers:
                bot_qq = req.GET.get("bot_qq")
                qq = req.GET.get("qq")
                token = req.GET.get("token")
                group_id = req.GET.get("group")
                print("bot: {} qq:{} token:{}".format(bot_qq, qq, token))
                if bot_qq and qq and token:
                    bot = None
                    qquser = None
                    group = None
                    api_rate_limit = True
                    try:
                        bot = QQBot.objects.get(user_id=bot_qq)
                    except QQBot.DoesNotExist:
                        print("bot {} does not exist".format(bot_qq))
                    try:
                        qquser = QQUser.objects.get(user_id=qq, bot_token=token)
                        if time.time() < qquser.last_api_time + qquser.api_interval:
                            api_rate_limit = False
                            print("qquser {} api rate limit exceed".format(qq))
                        qquser.last_api_time = int(time.time())
                        qquser.save(update_fields=["last_api_time"])
                    except QQUser.DoesNotExist:
                        print("qquser {}:{} auth fail".format(qq, token))
                        httpresponse = HttpResponse("QQUser {}:{} auth fail".format(qq, token), status=500)
                    if bot and qquser and api_rate_limit:
                        channel_layer = get_channel_layer()
                        msg = req.POST.get("text")
                        reqbody = req.body
                        try:
                            if reqbody:
                                reqbody = reqbody.decode()
                                reqbody = json.loads(reqbody)
                                msg = msg or reqbody.get("content")
                                msg = re.compile(
                                    "[\\x00-\\x08\\x0b-\\x0c\\x0e-\\x1f]"
                                ).sub(" ", msg)
                        except BaseException:
                            pass
                        if not msg:
                            msg = github_webhook(req)
                        if not msg:
                            print("Can't get msg from request:{}:{}".format(req, reqbody))
                            httpresponse = HttpResponse("Can't get message", status=500)
                        else:
                            print("body:{}".format(req.body.decode()))
                            if group_id:
                                try:
                                    group = QQGroup.objects.get(group_id=group_id)
                                    group_push_list = [
                                        user["user_id"]
                                        for user in json.loads(group.member_list)
                                        if (
                                                user["role"] == "owner"
                                                or user["role"] == "admin"
                                        )
                                    ]
                                    print("group push list:{}".format(group_push_list))
                                except QQGroup.DoesNotExist:
                                    print("group:{} does not exist".format(group_id))
                            msg = handle_hunt_msg(msg)
                            if (
                                    group
                                    and group.api
                                    and int(qquser.user_id) in group_push_list
                            ):
                                at_msg = "[CQ:at,qq={}]".format(qquser.user_id) if req.GET.get("at", "true")=="true" else str(qquser.user_id)
                                jdata = {
                                    "action": "send_group_msg",
                                    "params": {
                                        "group_id": group.group_id,
                                        "message": "Message from {}:\n{}".format(
                                            at_msg, msg
                                        ),
                                    },
                                    "echo": "",
                                }
                            else:
                                jdata = {
                                    "action": "send_private_msg",
                                    "params": {"user_id": qquser.user_id, "message": msg},
                                    "echo": "",
                                }
                            if not bot.api_post_url:
                                async_to_sync(channel_layer.send)(
                                    bot.api_channel_name,
                                    {"type": "send.event", "text": json.dumps(jdata)},
                                )
                            else:
                                url = os.path.join(bot.api_post_url,
                                                   "{}?access_token={}".format(jdata["action"], bot.access_token))
                                headers = {'Content-Type': 'application/json'}
                                r = requests.post(url=url, headers=headers, data=json.dumps(jdata["params"]))
                                if r.status_code != 200:
                                    logging.error(r.text)
                            httpresponse = HttpResponse("OK", status=200)
            if "hunt" in trackers:
                qq = req.GET.get("qq")
                token = req.GET.get("token")
                group_id = req.GET.get("group")
                bot_qq = req.GET.get("bot_qq")
                print("qq:{} token:{}, group:{}".format(qq, token, group_id))
                if bot_qq and qq and token:
                    qquser = None
                    group = None
                    api_rate_limit = True
                    try:
                        bot = QQBot.objects.get(user_id=bot_qq)
                        qquser = QQUser.objects.get(user_id=qq, bot_token=token)
                        if time.time() < qquser.last_api_time + qquser.api_interval:
                            api_rate_limit = False
                            print("qquser {} api rate limit exceed".format(qq))
                        httpresponse = HttpResponse("User API rate limit exceed", status=500)
                    except QQUser.DoesNotExist:
                        print("qquser {}:{} auth fail".format(qq, token))
                    except QQBot.DoesNotExist:
                        print("bot {} does not exist".format(bot_qq))
                    else:
                        channel_layer = get_channel_layer()
                        try:
                            reqbody = json.loads(req.body.decode())
                        except BaseException as e:
                            print(e)
                        else:
                            print("reqbody:{}".format(reqbody))
                            try:
                                hunt_group = HuntGroup.objects.get(group__group_id=group_id)
                                group = hunt_group.group
                                group_push_list = [
                                    user["user_id"]
                                    for user in json.loads(group.member_list)
                                ]
                                assert int(qquser.user_id) in group_push_list, "You're not in the group member list"
                                monster_name = reqbody["monster"]
                                zone_name = reqbody["zone"]
                                zone_name = zone_name.replace(chr(57521), "").replace(chr(57522), "2").replace(
                                    chr(57523), "3")
                                try:
                                    monster = Monster.objects.get(cn_name=monster_name)
                                except Monster.DoesNotExist:
                                    monster = Monster.objects.get(cn_name=re.sub("1|2|3", "", monster_name))
                                world_name = reqbody.get("world", "None")
                                timestamp = int(reqbody["time"])
                                server = None
                                world_id = reqbody.get("worldid", -1)
                                servers = Server.objects.filter(worldId=world_id)
                                server = servers[0] if servers.exists() else Server.objects.get(name=world_name)
                                # handle instances
                                if req.GET.get("strict_zone", "true")=="false" or str(monster.territory) in zone_name:  # "ZoneName2", "ZoneName"
                                    if str(monster.territory) != zone_name:  # "ZoneName2"
                                        monster_name = zone_name.replace(str(monster.territory),
                                                                         monster_name)  # "ZoneName2" -> "MonsterName2"
                                        try:
                                            monster = Monster.objects.get(cn_name=monster_name)
                                        except Monster.DoesNotExist:
                                            monster = Monster.objects.get(cn_name=re.sub("1|2|3", "", monster_name))
                                    print("Get HuntLog info:\nmonster:{}\nserver:{}".format(monster, server))
                                    if HuntLog.objects.filter(
                                            monster=monster,
                                            server=server,
                                            hunt_group=hunt_group,
                                            log_type="kill",
                                            time__gt=timestamp - 60).exists():
                                        msg = "{}——\"{}\" 已在一分钟内记录上报,此次API调用被忽略".format(server, monster,
                                                                                        time.strftime("%Y-%m-%d %H:%M:%S",
                                                                                                      time.localtime(
                                                                                                          timestamp))
                                                                                        )
                                    else:
                                        hunt_log = HuntLog(
                                            monster=monster,
                                            hunt_group=hunt_group,
                                            server=server,
                                            log_type="kill",
                                            time=timestamp
                                        )
                                        hunt_log.save()
                                        msg = "{}——\"{}\" 击杀时间: {}".format(hunt_log.server, monster,
                                                time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
                                            )
                                        at_msg = "[CQ:at,qq={}]".format(qquser.user_id) if req.GET.get("at", "true")=="true" else str(qquser.user_id)
                                        msg = at_msg + "通过API更新了如下HuntLog:\n{}".format(msg)
                                elif req.GET.get("verbose", "true")=="true":
                                    at_msg = "[CQ:at,qq={}]".format(qquser.user_id) if req.GET.get("at", "true")=="true" else str(qquser.user_id)
                                    msg = at_msg + "上报 {} 失败,{} 与 {} 不兼容".format(monster, monster.territory, zone_name)
                                jdata = {
                                    "action": "send_group_msg",
                                    "params": {
                                        "group_id": hunt_group.group.group_id,
                                        "message": msg,
                                    },
                                    "echo": "",
                                }
                                if not bot.api_post_url:
                                    async_to_sync(channel_layer.send)(
                                        bot.api_channel_name,
                                        {"type": "send.event", "text": json.dumps(jdata)},
                                    )
                                else:
                                    url = os.path.join(bot.api_post_url,
                                                       "{}?access_token={}".format(jdata["action"], bot.access_token))
                                    headers = {'Content-Type': 'application/json'}
                                    r = requests.post(url=url, headers=headers, data=json.dumps(jdata["params"]))
                                    if r.status_code != 200:
                                        logging.error(r.text)
                                httpresponse = HttpResponse(status=200)
                            except HuntGroup.DoesNotExist:
                                print("HuntGroup:{} does not exist".format(group_id))
                                httpresponse = HttpResponse("HuntGroup:{} does not exist".format(group_id), status=500)
                            except Monster.DoesNotExist:
                                print("Monster:{} does not exist".format(monster_name))
                                httpresponse = HttpResponse("Monster:{} does not exist".format(monster_name),
                                                            status=500)
                            except Server.DoesNotExist:
                                print("Server:{} does not exist".format(world_name))
                                httpresponse = HttpResponse("Server:{} does not exist".format(world_name), status=500)
                            except AssertionError as e:
                                print(str(e))
                                httpresponse = HttpResponse(str(e), status=500)
                else:
                    httpresponse = HttpResponse("Missing URL parameters", status=500)
            if "webapi" in trackers:
                qq = req.GET.get("qq")
                token = req.GET.get("token")
                print("qq:{}\ntoken:{}".format(qq, token))
                if qq and token:
                    qquser = None
                    try:
                        qquser = QQUser.objects.get(user_id=qq, bot_token=token)
                    except QQUser.DoesNotExist:
                        res_dict = {
                            "response": "error",
                            "msg": "Invalid API token",
                            "rcode": "101",
                        }
                        return JsonResponse(res_dict)
                    if qquser:
                        res_dict = webapi(req)
                        return JsonResponse(res_dict)
                else:
                    res_dict = {
                        "response": "error",
                        "msg": "Invalid request",
                        "rcode": "100",
                    }
                    return JsonResponse(res_dict)
                return HttpResponse("Default API Error, contact dev please", status=500)
    return httpresponse if httpresponse else HttpResponse("Default API Error, contact dev please.", status=500)