Пример #1
0
def register_celery_alias(alias="redis-sentinel"):
    BACKEND_ALIASES[alias] = "rpaas.celery_sentinel.RedisSentinelBackend"
    TRANSPORT_ALIASES[alias] = "rpaas.celery_sentinel.RedisSentinelTransport"
    try:
        patch_flower_broker()
    except:
        logging.exception('ignored error patching flower')
Пример #2
0
    def _spin(self, sink, value, terminate):
        while True:
            try:
                if value is _NO_VALUE:
                    sink.terminate()
                elif terminate:
                    sink.consume_and_terminate(value)
                else:
                    sink.consume(value)
            except Exception as e:  # pylint:disable=broad-except
                logging.exception(e)

            with self._lock:
                if terminate:
                    self._spinning = False
                    return
                elif self._values:
                    value = self._values.pop(0)
                    terminate = not self._values and not self._active
                elif not self._active:
                    value = _NO_VALUE
                    terminate = True
                else:
                    self._spinning = False
                    return
Пример #3
0
    def __init__(self, backend, platform_backend, credentials_path):
        super(Browser, self).__init__(app_backend=backend, platform_backend=platform_backend)
        try:
            self._browser_backend = backend
            self._platform_backend = platform_backend
            self._tabs = tab_list.TabList(backend.tab_list_backend)
            self.credentials = browser_credentials.BrowserCredentials()
            self.credentials.credentials_path = credentials_path
            self._platform_backend.DidCreateBrowser(self, self._browser_backend)
            browser_options = self._browser_backend.browser_options
            self.platform.FlushDnsCache()
            if browser_options.clear_sytem_cache_for_browser_and_profile_on_start:
                if self.platform.CanFlushIndividualFilesFromSystemCache():
                    self.platform.FlushSystemCacheForDirectory(self._browser_backend.profile_directory)
                    self.platform.FlushSystemCacheForDirectory(self._browser_backend.browser_directory)
                else:
                    self.platform.FlushEntireSystemCache()

            self._browser_backend.SetBrowser(self)
            self._browser_backend.Start()
            self._platform_backend.DidStartBrowser(self, self._browser_backend)
            self._profiling_controller = profiling_controller.ProfilingController(
                self._browser_backend.profiling_controller_backend
            )
        except Exception:
            exc_info = sys.exc_info()
            logging.exception("Failure while starting browser backend.")
            try:
                self._platform_backend.WillCloseBrowser(self, self._browser_backend)
            except Exception:
                exception_formatter.PrintFormattedException(msg="Exception raised while closing platform backend")
            raise exc_info[0], exc_info[1], exc_info[2]
Пример #4
0
 def render(self):
     try:
         file = self.get_file()
         params = {'file': file, 'name': self.name}
         return utils.get_render_string_by_extension(file, params, False)
     except Exception,e:
         logging.exception(e)
Пример #5
0
    def find_existing_ticket(self, matches):
        # Default title, get stripped search version
        if 'alert_subject' not in self.rule:
            title = self.create_default_title(matches, True)
        else:
            title = self.create_title(matches)

        if 'jira_ignore_in_title' in self.rule:
            title = title.replace(matches[0].get(self.rule['jira_ignore_in_title'], ''), '')

        # This is necessary for search to work. Other special characters and dashes
        # directly adjacent to words appear to be ok
        title = title.replace(' - ', ' ')
        title = title.replace('\\', '\\\\')

        date = (datetime.datetime.now() - datetime.timedelta(days=self.max_age)).strftime('%Y-%m-%d')
        jql = 'project=%s AND summary~"%s" and created >= "%s"' % (self.project, title, date)
        if self.bump_in_statuses:
            jql = '%s and status in (%s)' % (jql, ','.join(self.bump_in_statuses))
        if self.bump_not_in_statuses:
            jql = '%s and status not in (%s)' % (jql, ','.join(self.bump_not_in_statuses))
        try:
            issues = self.client.search_issues(jql)
        except JIRAError as e:
            logging.exception("Error while searching for JIRA ticket using jql '%s': %s" % (jql, e))
            return None

        if len(issues):
            return issues[0]
Пример #6
0
def get_subject_sections_from_opendata(subject, term):
    """Get info on all sections offered for all courses of a given subject and
    term.

    Args:
        subject: The department ID (eg. CS)
        term: The 4-digit Quest term code (defaults to current term)
    """
    url = ('{api_url}/terms/{term}/{subject}/schedule.json'
            '?key={api_key}'.format(
                api_url=API_UWATERLOO_V2_URL,
                api_key=s.OPEN_DATA_API_KEY,
                subject=subject,
                term=term,
    ))

    data = get_data_from_url(url)
    try:
        sections = data['data']
    except (KeyError, TypeError):
        logging.exception('crawler.py: Schedule API call failed with'
                " url %s and data:\n%s" % (url, data))
        raise

    return sections
Пример #7
0
	def locate_bible_refs(self, sepost_object):
		"""Reads the body to locate found bible references, tags the body, and stores an array of found references"""

		refparser_url = "http://api.biblia.com/v1/bible/scan/?"
		biblia_apikey = self.ini.get_ini_value('keys', 'biblia_apikey')

		sepost_object.biblia_apikey = biblia_apikey
		sepost_object.found_refs = []
		nchunk_start = 0
		nchunk_size=1000

		se_body = sepost_object.body.encode('utf-8', errors='ignore')

		while nchunk_start < len(se_body):
			body_chunk = se_body[nchunk_start:nchunk_size]

			refparser_params = {'text': body_chunk, 'key': biblia_apikey }
			headers = {'content-type': 'text/plain; charset=utf-8', 'Accept-Encoding': 'gzip,deflate,sdch'}

			refparse = requests.get(refparser_url, params = refparser_params, headers=headers)

			if (refparse.status_code == 200):
				foundrefs = json.loads(refparse.text)
				for foundref in foundrefs['results']:
					foundref['textIndex'] += nchunk_start
					sepost_object.found_refs.append( foundref )
			else:
				msg = "Status Code {0}: Failed to retrieve valid parsing info at {1}\n     returned text is: =>{2}<=".format(refparse.status_code, refparse.url, refparse.text)
				logging.exception(msg)

			nchunk_start += (nchunk_size-50)
def cookie2user(cookie_str):
    '''
    Parse cookie and load user if cookie is valid.
    '''
    if not cookie_str:
        return None
    try:
        L = cookie_str.split('-')
        if len(L) != 3:
            return None
        uid, expires, sha1 = L
        if int(expires) < time.time():
            return None
        user = yield from User.find(uid)
        local_auths = yield from LocalAuth.findAll('user_id=?',[uid])
        local_auth = local_auths[0]
        if user is None:
            return None
        s = '%s-%s-%s-%s' % (uid, local_auth.user_pwd, expires, _COOKIE_KEY)
        if sha1 != hashlib.sha1(s.encode('utf-8')).hexdigest():
            logging.info('invalid sha1')
            return None
        return user
    except Exception as e:
        logging.exception(e)
        return None
Пример #9
0
def dump_pickle(input_obj, file_name, how='normal'):
    """
    存贮python序列化的本地文件
    :param input_obj: 需要进行序列化的对象
    :param file_name: 文件名,str对象, 相对路径或者绝对路径
    :param how: 序列化协议选择,默认normal不特殊处理,
                zero使用python2, python3协议兼容模式,使用protocol=0,
                high使用支持的最高协议
    """
    ensure_dir(file_name)

    print('please wait! dump_pickle....:', file_name)

    try:
        with open(file_name, "wb") as pick_file:
            if K_SET_PICKLE_HIGHEST_PROTOCOL or how == 'high':
                """使用所支持的最高协议进行dump"""
                pickle.dump(input_obj, pick_file, pickle.HIGHEST_PROTOCOL)
            elif K_SET_PICKLE_ZERO_PROTOCOL or how == 'zero':
                """python2, python3协议兼容模式,使用protocol=0"""
                pickle.dump(input_obj, pick_file, 0)
            else:
                pickler = Pickler(pick_file)
                pickler.dump(input_obj)
    except Exception as e:
        logging.exception(e)
Пример #10
0
 def __init__(self):
     try:
         with open('logging.json') as jl:
             dictConfig(json.load(jl))
         self.client = redis.Redis('db')
     except Exception, e:
         logging.exception('Problem instantiating batch/job repository (%s)' % e)
Пример #11
0
    def test (self):
        try:
            if self.nmctype == "namecoind":
                res = self.callRPC ("getinfo", [])
                vers = res["version"]
                
                v3 = vers % 100
                vers = vers / 100
                v2 = vers % 100
                vers = vers / 100
                v1 = vers
                if v3 == 0:
                  versStr = "0.%d.%d" % (v1, v2)
                else:
                  versStr = "0.%d.%d.%d" % (v1, v2, v3)
                return ('success',  tr._translate("MainWindow",'Success!  Namecoind version %1 running.').arg(unicode(versStr)) )

            elif self.nmctype == "nmcontrol":
                res = self.callRPC ("data", ["status"])
                prefix = "Plugin data running"
                if ("reply" in res) and res["reply"][:len(prefix)] == prefix:
                    return ('success', tr._translate("MainWindow",'Success!  NMControll is up and running.'))

                logger.error("Unexpected nmcontrol reply: %s", res)
                return ('failed',  tr._translate("MainWindow",'Couldn\'t understand NMControl.'))

            else:
                assert False

        except Exception:
            logger.exception("Namecoin connection test failure")
            return ('failed', "The connection to namecoin failed.")
Пример #12
0
    def handle_join_success(self, data):
        # portal should be acquisition wrapped, this is needed for the schema
        # adapter below
        portal = getToolByName(self.context, 'portal_url').getPortalObject()
        registration = getToolByName(self.context, 'portal_registration')
        portal_props = getToolByName(self.context, 'portal_properties')
        mt = getToolByName(self.context, 'portal_membership')
        props = portal_props.site_properties
        use_email_as_login = props.getProperty('use_email_as_login')

        if use_email_as_login:
            # The username field is not shown as the email is going to
            # be the username, but the field *is* needed further down
            # the line.
            data['username'] = data['email']
            # Set username in the form; at least needed for logging in
            # immediately when password reset is bypassed.
            self.request.form['form.username'] = data['email']

        user_id = data['username']
        password = data.get('password') or registration.generatePassword()
        if isinstance(password, unicode):
            password = password.encode('utf8')

        try:
            registration.addMember(user_id, password, REQUEST=self.request)
        except (AttributeError, ValueError), err:
            logging.exception(err)
            IStatusMessage(self.request).addStatusMessage(err, type="error")
            return
Пример #13
0
    def render_to_string(self, request, inline=True):
        """Render the Review UI to an HTML string.

        This renders the Review UI to a string for use in embedding into
        either an existing page or a new page.

        Args:
            request (django.http.HttpRequest):
                The HTTP request from the client.

            inline (bool, optional):
                Whether to render this such that it can be embedded into an
                existing page, instead of as a standalone page.

        Returns:
            django.utils.safestring.SafeText:
            The HTML for the Review UI.
        """
        self.request = request

        try:
            context = self.build_render_context(request, inline=inline)

            return render_to_string(
                self.template_name,
                RequestContext(request, context))
        except Exception as e:
            logging.exception('Error when rendering %r: %s', self, e)
Пример #14
0
    def _parse_history_response(self, user, login, page_num, callback, title_ids, data):
        total = 0
        lastlink = None
        thislink = None
        try:
            for item in data['rental_history']['rental_history_item']:
                if item is None or 'link' not in item:
                    continue
                itemlinks = item['link']
                if 'href' in itemlinks:
			        itemlinks = [itemlinks] #add single item to a list so for loop is consistent
                lastlink = thislink
                thislink = item

                total = total+1
                for link in itemlinks:
                    if link['rel'] == 'http://schemas.netflix.com/catalog/title.season':
                        if link['href'] in title_ids:
                            continue
                        title_ids.append(link['href'])
                    if link['rel'] == 'http://schemas.netflix.com/catalog/title':
                        if link['href'].find('titles/programs') > -1 or link['href'].find('titles/discs') > -1:
                            continue
                        if link['href'] in title_ids:
                            break
                        title_ids.append(link['href'])
        except Exception, e:
            logging.error('lastlink %s' % lastlink)
            logging.error('thislink %s' % thislink)
            logging.error('rental_history %s' % data['rental_history'])
            logging.exception(e)
            return None
Пример #15
0
    def netflix_request(self, path, callback, access_token=None,
                           post_args=None, **args):
        args['output'] = 'json'
        overridepost = False
        if(args.get('override') == 'POST'):
            args.pop('override')
            overridepost = True
            post_args = args
            args = {}
        # Add the OAuth resource request signature if we have credentials
        url = 'http://api.netflix.com' + path
        if access_token:
            #args['output'] = 'json'
            all_args = {}
            all_args.update(args)
            all_args.update(post_args or {})
            method = 'POST' if post_args is not None else 'GET'
            oauth = self._oauth_request_parameters(
                url, access_token, all_args, method=method)
            args.update(oauth)

        if args:
            url += '?' + urllib.urlencode(args)
        try:
            if post_args is not None:
                response = urlfetch.fetch(url, method='POST',
                    payload=urllib.urlencode(post_args), deadline=10)
            else:
                response = urlfetch.fetch(url, deadline=10)
        except urlfetch.DownloadError, e:
            logging.exception(e)
            response = None
Пример #16
0
def do_rename(old_path, new_path):
    try:
        os.rename(old_path, new_path)
    except Exception, err:
        logging.exception("Rename failed on %s to %s  err: %s", old_path, new_path, \
                          str(err))
        raise
Пример #17
0
def do_makedirs(path):
    try:
        os.makedirs(path)
    except Exception, err:
        logging.exception("Makedirs failed on %s err: %s", path, str(err))
        if err.errno != errno.EEXIST:
            raise
Пример #18
0
def add_score(request):
  if request.method != 'POST':
    players = Player.gql("WHERE owner = :owner AND active = True ORDER BY name", owner=request.user)
    return render_to_response(request, 'pingpong/addscore.html',
      { 'players': players, })
  else:
    mode = 'singles' # Used when we re-direct back to the main view
    try:
      # Find players. Save teams. Save game using a ranking system.
      t1p1 = get_object(Player, request.POST['t1p1'])
      t1p2 = get_object(Player, request.POST['t1p2'])
      t2p1 = get_object(Player, request.POST['t2p1'])
      t2p2 = get_object(Player, request.POST['t2p2'])
      t1s = float(request.POST['t1s'])
      t2s = float(request.POST['t2s'])
      t1 = db_create(Team, player1=t1p1, player2=t1p2, points=t1s)
      t2 = db_create(Team, player1=t2p1, player2=t2p2, points=t2s)
      game = db_create(Game, team1=t1, team2=t2)
      save_player_games(game, t1p1, t1p2, t2p1, t2p2, t1s, t2s)
      doubles = (t1p1 != None and t1p2 != None and t2p1 != None and t2p2 != None)
      if doubles:
        mode = 'doubles'
      ranking_system = DefaultRankingSystem()
      ranking_system.save_game(t1p1=t1p1, t1p2=t1p2, t2p1=t2p1, t2p2=t2p2, 
        t1s=t1s, t2s=t2s, t1=t1, t2=t2, game=game, doubles=doubles)
      response_dict = { 'status': True, 'message': 'Scores successfully saved.', 
        'mode': mode, 'game': str(game.key()) }
    except:
      logging.exception('There was a problem adding scores')
      response_dict = { 'status': False, 'message' : 'Hmmm... There was a problem saving your scores - please have another go.', 'mode': mode }
    return HttpResponse(simplejson.dumps(response_dict), mimetype='application/json')
Пример #19
0
def do_rmdir(path):
    try:
        os.rmdir(path)
    except Exception, err:
        logging.exception("Rmdir failed on %s err: %s", path, str(err))
        if err.errno != errno.ENOENT:
            raise
Пример #20
0
 def _loop(self):
     while not self.stop_event.is_set():
         try:
             self.func()
         except:
             logging.exception("Scheduler error")
         self.stop_event.wait(self.interval)
def main():
  signal.signal(signal.SIGUSR1, DumpThreadStacks)

  parser = argparse.ArgumentParser()
  command_parsers = parser.add_subparsers(title='test types',
                                          dest='command')

  for test_type, config in sorted(VALID_COMMANDS.iteritems(),
                                  key=lambda x: x[0]):
    subparser = command_parsers.add_parser(
        test_type, usage='%(prog)s [options]', help=config.help_txt)
    config.add_options_func(subparser)

  args = parser.parse_args()

  try:
    return RunTestsCommand(args, parser)
  except base_error.BaseError as e:
    logging.exception('Error occurred.')
    if e.is_infra_error:
      return constants.INFRA_EXIT_CODE
    return constants.ERROR_EXIT_CODE
  except: # pylint: disable=W0702
    logging.exception('Unrecognized error occurred.')
    return constants.ERROR_EXIT_CODE
Пример #22
0
  def post(self):
    """POST handler."""
    try:
      namespace = self.request.get('namespace', None)
      save_manifest = self.request.get('save_manifest', 'true') == 'true'
      vcs = versions.VersionControlService()
      staging_changeset = versions.Changeset(
          int(self.request.get('changeset')), namespace=namespace)
      force = bool(self.request.get('force', False))
      manifest = self.request.POST.get('manifest', None)
      if not force and not manifest or force and manifest:
        self.error(400)
        logging.error('Exactly one of "manifest" or "force" params is required')
        return

      # If a client has full knowledge of the files uploaded to a changeset,
      # the "manifest" param may be given to ensure a strongly consistent
      # commit. If given, associate the files to the changeset and finalize it.
      if manifest:
        manifest = json.loads(manifest)
        for path in manifest:
          titan_file = files.File(path, changeset=staging_changeset,
                                  namespace=namespace, _internal=True)
          staging_changeset.associate_file(titan_file)
        staging_changeset.finalize_associated_files()

      final_changeset = vcs.commit(
          staging_changeset, force=force, save_manifest=save_manifest)
      self.write_json_response(final_changeset)
      self.response.set_status(201)
    except (TypeError, ValueError):
      self.error(400)
      logging.exception('Bad request:')
Пример #23
0
    def _process_result(self):
        document = XML(self._xml_data)

        if document.find(_FIND_DESCRIPTION) is None:
            logging.debug('Bundle %s not available in the server for the '
                'version %s', self._bundle.get_bundle_id(), config.version)
            version = None
            link = None
            size = None
        else:
            try:
                version = NormalizedVersion(document.find(_FIND_VERSION).text)
            except InvalidVersionError:
                logging.exception('Exception occured while parsing version')
                version = '0'

            link = document.find(_FIND_LINK).text

            try:
                size = long(document.find(_FIND_SIZE).text) * 1024
            except ValueError:
                logging.exception('Exception occured while parsing size')
                size = 0

        global _fetcher
        _fetcher = None
        self._completion_cb(self._bundle, version, link, size, None)
Пример #24
0
  def post(self):


    if 'X-AppEngine-TaskName' not in self.request.headers:
      logging.critical('Detected an attempted XSRF attack. The header '
                       '"X-AppEngine-Taskname" was not set.')
      self.response.set_status(403)
      return


    # TODO In AppScale we do not check for this XSRF attacks. 
    # We need some additional # auth like we do for taskqueue with a secret hash.
    #
    #in_prod = (
    #    not self.request.environ.get("SERVER_SOFTWARE").startswith("Devel"))
    #if in_prod and self.request.environ.get("REMOTE_ADDR") != "0.1.0.2":
    #  logging.critical('Detected an attempted XSRF attack. This request did '
    #                   'not originate from Task Queue.')
    #  self.response.set_status(403)
    #  return


    headers = ["%s:%s" % (k, v) for k, v in self.request.headers.items()
               if k.lower().startswith("x-appengine-")]
    try:
      run(self.request.body)
    except PermanentTaskFailure, e:

      logging.exception("Permanent failure attempting to execute task")
Пример #25
0
def get_stats(session, collect_host_stats, consolidation_function, interval, start_time):
    try:

        if collect_host_stats == "true":
            url = "http://localhost/rrd_updates?"
            url += "session_id=" + session._session
            url += "&host=" + collect_host_stats
            url += "&cf=" + consolidation_function
            url += "&interval=" + str(interval)
            url += "&start=" + str(int(time.time()) - 100)
        else:
            url = "http://localhost/rrd_updates?"
            url += "session_id=" + session._session
            url += "&host=" + collect_host_stats
            url += "&cf=" + consolidation_function
            url += "&interval=" + str(interval)
            url += "&start=" + str(int(time.time()) - 100)

        logging.debug("Calling URL: %s", url)
        sock = urllib.URLopener().open(url)
        xml = sock.read()
        sock.close()
        logging.debug("Size of returned XML: %s", len(xml))
        return xml
    except Exception, e:
        logging.exception("get_stats() failed")
        raise
Пример #26
0
    def _get_options(self):
        """Get the available option information from the extensions
        """
        options = {}

        path = os.path.join(config.ext_path, 'cpsection')
        folder = os.listdir(path)

        for item in folder:
            if os.path.isdir(os.path.join(path, item)) and \
                    os.path.exists(os.path.join(path, item, '__init__.py')):
                try:
                    mod = __import__('.'.join(('cpsection', item)),
                                     globals(), locals(), [item])
                    view_class = getattr(mod, 'CLASS', None)
                    if view_class is not None:
                        options[item] = {}
                        options[item]['alerts'] = []
                        options[item]['view'] = view_class
                        options[item]['icon'] = getattr(mod, 'ICON', item)
                        options[item]['title'] = getattr(mod, 'TITLE', item)
                        options[item]['color'] = getattr(mod, 'COLOR', None)
                        keywords = getattr(mod, 'KEYWORDS', [])
                        keywords.append(options[item]['title'].lower())
                        if item not in keywords:
                            keywords.append(item)
                        options[item]['keywords'] = keywords
                    else:
                        _logger.error('no CLASS attribute in %r', item)
                except Exception:
                    logging.exception('Exception while loading extension:')

        return options
Пример #27
0
    def get(self):
        """
        List API versions.

        :return: Returns the api versions.
        :rtype: :class:`flask.response`
        """
        # at least let it look like an open stack function
        try:
            resp = dict()
            resp["versions"] = dict()
            resp["versions"] = [
                {
                    "id": "v1",
                    "links": [{"href": "http://%s:%d/v1/" % (self.api.ip, self.api.port), "rel": "self"}],
                    "status": "CURRENT",
                    "version": "1",
                    "min_version": "1",
                    "updated": "2013-07-23T11:33:21Z",
                }
            ]

            return Response(json.dumps(resp), status=200, mimetype="application/json")

        except Exception as ex:
            logging.exception(u"%s: Could not show list of versions." % __name__)
            return ex.message, 500
Пример #28
0
    def process(self, iprot, oprot):
        try:
            if iprot.upgraded() is not None:
                return self._underlying.process(iprot, oprot)
        except AttributeError as e:
            logging.exception("underlying protocol object is not a TFinagleServerProtocol", e)
            return self._underlying.process(iprot, oprot)

        (name, ttype, seqid) = iprot.readMessageBegin()
        if ttype != TMessageType.CALL and ttype != TMessageType.ONEWAY:
            raise TException("TFinagle protocol only supports CALL & ONEWAY")

        # Check if this is an upgrade request.
        if name == UPGRADE_METHOD:
            connection_options = ConnectionOptions()
            connection_options.read(iprot)
            iprot.readMessageEnd()

            oprot.writeMessageBegin(UPGRADE_METHOD, TMessageType.REPLY, seqid)
            upgrade_reply = UpgradeReply()
            upgrade_reply.write(oprot)
            oprot.writeMessageEnd()
            oprot.trans.flush()

            iprot.set_upgraded(True)
            oprot.set_upgraded(True)
            return True

        # Not upgraded. Replay the message begin to the underlying processor.
        iprot.set_upgraded(False)
        oprot.set_upgraded(False)
        msg = (name, ttype, seqid)
        return self._underlying.process(StoredMessageProtocol(iprot, msg), oprot)
Пример #29
0
    def from_args(args):
        cred = SMBCredential()
        cred.from_target_string(args.target)
        if args.hashes is not None:
            cred.lm_hash, cred.nt_hash = args.hashes.split(':')
        
        if args.aesKey is not None:
            try:
                bytes.fromhex(args.aesKey)
            except Exception as e:
                logging.exception('Kerberos AES key format incorrect!')

            t = len(args.aesKey)
            if t == 64:
                cred.kerberos_key_aes_256 = args.aesKey.lower()
            elif t == 32:
                cred.kerberos_key_aes_128 = args.aesKey.lower()
            else:
                raise Exception('Kerberos AES key length incorrect!')

        if args.k is True:
            if cred.has_kerberos_secret() == False:
                raise Exception('Trying to perform Kerberos authentication with no usable kerberos secrets!')
            cred.force_kerberos = True
        
        if args.no_pass == False and cred.has_secret() == False:
            cred.password = getpass.getpass()

        return cred
Пример #30
0
    def __call__(self, request):
        self.app_id = get_origin(request.environ)
        self.facets = [self.app_id]

        page = request.path_info_pop()

        if not page:
            return json.dumps(self.facets)

        try:
            username = request.params.get('username', 'user')
            data = request.params.get('data', None)

            if page == 'enroll':
                return self.enroll(username)
            elif page == 'bind':
                return self.bind(username, data)
            elif page == 'sign':
                return self.sign(username)
            elif page == 'verify':
                return self.verify(username, data)
            else:
                raise exc.HTTPNotFound()
        except Exception:
            log.exception("Exception in call to '%s'", page)
            return exc.HTTPBadRequest(comment=traceback.format_exc())
Пример #31
0
    if "REPOSITORY_URL" in os.environ:
        repo = os.environ["REPOSITORY_URL"]
    else:
        repo = subprocess.check_output(["git", "config", "remote.origin.url"])
    repo = repo.strip().replace("[email protected]:", "https://github.com/")
    if "BRANCH" in os.environ:
        branch = os.environ["BRANCH"]
    else:
        branch = subprocess.check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"])
        branch = branch.strip()
    base = subprocess.check_output(["git", "rev-parse", "--show-prefix"])
    base = base.strip().strip("/")
    urltemplate = ("{repo}/tree/{branch}/{base}/{filename}"
        .format(repo=repo, branch=branch, base=base, filename="{}"))
except:
    logging.exception("Could not generate repository URL; generating local URLs instead.")
    urltemplate = "file://{pwd}/{filename}".format(pwd=os.environ["PWD"], filename="{}")
try:
    commit = subprocess.check_output(["git", "rev-parse", "--short", "HEAD"])
except:
    logging.exception("Could not figure out HEAD commit.")
    commit = "??????"
try:
    dirtyfiles = subprocess.check_output(["git", "status", "--porcelain"])
except:
    logging.exception("Could not figure out repository cleanliness.")
    dirtyfiles = "?? git status --porcelain failed"

def makelink(filename):
    if os.path.isfile(filename):
        url = urltemplate.format(filename)
Пример #32
0
    def server_run(self):
        """The mean server thread."""
        timeout = self.timeout

        # Create a TCP/IP socket
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        # Connect the socket to the port where the server is listening
        server_address = (self.server, self.port)
        logging.info(
            'ThorCamServer: starting up on {} port {}'.format(*server_address))

        try:
            sock.bind(server_address)
            sock.listen(1)

            r, _, _ = select.select([sock], [], [])
            if not r:
                raise TypeError

            connection, client_address = sock.accept()
            msg_len, msg_buff = (), b''

            try:
                while True:
                    r, _, _ = select.select([connection], [], [], timeout)
                    if r:
                        msg_len, msg_buff, msg, value = self.read_msg(
                            connection, msg_len, msg_buff)
                        if msg and self.process_client_message(
                                connection, msg, value) == 'eof':
                            return

                    try:
                        while self.tsi_cam is not None:
                            msg, value = \
                                self.tsi_cam.from_cam_queue.get_nowait()
                            self.process_cam_message(connection, msg, value)
                    except Empty:
                        pass
            except connection_errors:
                pass
            finally:
                logging.info('ThorCamServer: closing client connection')
                connection.close()
        except Exception as e:
            logging.exception(e)
        finally:
            if self.tsi_cam is not None:
                self.tsi_cam.send_message('close_cam')
                if self.tsi_cam.camera_thread is not None:
                    self.tsi_cam.camera_thread.join()

            try:
                if self.tsi_sdk is not None:
                    self.tsi_sdk.Dispose()

                if self.tsi_color_sdk is not None:
                    self.tsi_color_sdk.Dispose()
            finally:
                logging.info('ThorCamServer: closing socket')
                sock.close()
Пример #33
0
def install_nightly_packs(client: demisto_client,
                          host: str,
                          packs_to_install: List,
                          request_timeout: int = 999999):
    """
    Install content packs on nightly build.
    We will catch the exception if pack fails to install and send the request to install packs again without the
    corrupted pack.
    Args:
        client(demisto_client): The configured client to use.
        host (str): The server URL.
        packs_to_install (list): A list of the packs to install.
        request_timeout (int): Timeout settings for the installation request.

    Returns:
        None: No data returned.
    """
    logging.info(f'Installing packs on server {host}')
    # make the pack installation request
    all_packs_install_successfully = False
    request_data = {'packs': packs_to_install, 'ignoreWarnings': True}
    while not all_packs_install_successfully:
        try:
            packs_to_install_str = ', '.join(
                [pack['id'] for pack in packs_to_install])
            logging.debug(
                f'Installing the following packs in server {host}:\n{packs_to_install_str}'
            )
            response_data, status_code, _ = demisto_client.generic_request_func(
                client,
                path='/contentpacks/marketplace/install',
                method='POST',
                body=request_data,
                accept='application/json',
                _request_timeout=request_timeout)

            if 200 <= status_code < 300:
                packs_data = [{
                    'ID': pack.get('id'),
                    'CurrentVersion': pack.get('currentVersion')
                } for pack in ast.literal_eval(response_data)]
                logging.success(
                    f'Packs were successfully installed on server {host}')
                logging.debug(
                    f'The following packs were successfully installed on server {host}:\n{packs_data}'
                )
            else:
                result_object = ast.literal_eval(response_data)
                message = result_object.get('message', '')
                raise Exception(
                    f'Failed to install packs - with status code {status_code}\n{message}\n'
                )
            break

        except Exception as e:
            all_packs_install_successfully = False
            malformed_pack_id = find_malformed_pack_id(str(e))
            if not malformed_pack_id:
                logging.exception('The request to install packs has failed')
                raise
            pack_ids_to_install = {pack['id'] for pack in packs_to_install}
            malformed_pack_id = malformed_pack_id[0]
            if malformed_pack_id not in pack_ids_to_install:
                logging.exception(
                    f'The pack {malformed_pack_id} has failed to install even though it was not in the installation list'
                )
                raise
            logging.warning(
                f'The request to install packs has failed, retrying without {malformed_pack_id}'
            )
            # Remove the malformed pack from the pack to install list.
            packs_to_install = [
                pack for pack in packs_to_install
                if pack['id'] not in malformed_pack_id
            ]
            request_data = {'packs': packs_to_install, 'ignoreWarnings': True}
Пример #34
0
import sys
import logging
try:
    import inflect
except ImportError:
    logging.exception(
        "Pluralize module requires 'inflect' package to be installed."
        "Install it and try again")
    sys.exit(1)

from rita.macros import resolve_value
from rita.utils import flatten


def pluralizing(initial_list):
    """"
    For a list of nouns, it will return a list of the plurals and the initial nouns
    """
    p = inflect.engine()
    plurals = [p.plural(word) for word in initial_list]
    return initial_list + plurals


def PLURALIZE(*args, config, op=None):
    """
    For a noun or a list of nouns, it will match any singular or plural word
    Usage for a single word, e.g.:
    PLURALIZE("car")
    Usage for lists, e.g.:
    vehicles = {"car", "bicycle", "ship"}
    PLURALIZE(vehicles)
Пример #35
0
 def wrapped(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except (ProgrammingError, OperationalError):
         logging.exception('Failed processing signal %s', func.__name__)
         return
Пример #36
0
    def __init__(self, configfile, screen=None):
        self.default_config_path = os.path.join(klipperscreendir,
                                                "ks_includes", "defaults.conf")
        self.config = configparser.ConfigParser()
        self.config_path = self.get_config_file_location(configfile)
        logging.debug("Config path location: %s" % self.config_path)
        self.defined_config = None

        try:
            self.config.read(self.default_config_path)
            if self.config_path != self.default_config_path:
                user_def, saved_def = self.separate_saved_config(
                    self.config_path)
                self.defined_config = configparser.ConfigParser()
                self.defined_config.read_string(user_def)

                includes = [
                    i[8:] for i in self.defined_config.sections()
                    if i.startswith("include ")
                ]
                for include in includes:
                    self._include_config(
                        "/".join(self.config_path.split("/")[:-1]), include)

                for i in [
                        'menu __main', 'menu __print', 'menu __splashscreen',
                        'preheat'
                ]:
                    for j in self.defined_config.sections():
                        if j.startswith(i):
                            for k in list(self.config.sections()):
                                if k.startswith(i):
                                    del self.config[k]
                            break

                self.log_config(self.defined_config)
                self.config.read_string(user_def)
                if saved_def is not None:
                    self.config.read_string(saved_def)
                    logging.info(
                        "====== Saved Def ======\n%s\n======================="
                        % saved_def)
        except KeyError:
            raise ConfigError(f"Error reading config: {self.config_path}")
        except Exception:
            logging.exception("Unknown error with config")

        printers = sorted(
            [i for i in self.config.sections() if i.startswith("printer ")])
        self.printers = []
        for printer in printers:
            self.printers.append({
                printer[8:]: {
                    "moonraker_host":
                    self.config.get(printer,
                                    "moonraker_host",
                                    fallback="127.0.0.1"),
                    "moonraker_port":
                    self.config.get(printer, "moonraker_port",
                                    fallback="7125"),
                    "moonraker_api_key":
                    self.config.get(printer,
                                    "moonraker_api_key",
                                    fallback=False)
                }
            })
        if len(printers) <= 0:
            self.printers.append({
                "Printer": {
                    "moonraker_host":
                    self.config.get("main",
                                    "moonraker_host",
                                    fallback="127.0.0.1"),
                    "moonraker_port":
                    self.config.get("main", "moonraker_port", fallback="7125"),
                    "moonraker_api_key":
                    self.config.get("main", "moonraker_api_key", fallback="")
                }
            })

        conf_printers_debug = copy.deepcopy(self.printers)
        for printer in conf_printers_debug:
            name = list(printer)[0]
            item = conf_printers_debug[conf_printers_debug.index(printer)]
            if item[list(printer)[0]]['moonraker_api_key'] != "":
                item[list(printer)[0]]['moonraker_api_key'] = "redacted"
        logging.debug("Configured printers: %s" %
                      json.dumps(conf_printers_debug, indent=2))

        lang = self.get_main_config_option("language", None)
        lang = [lang] if lang is not None and lang != "default" else None
        logging.info("Detected language: %s" % lang)
        self.lang = gettext.translation('KlipperScreen',
                                        localedir='ks_includes/locales',
                                        languages=lang,
                                        fallback=True)

        self._create_configurable_options(screen)
Пример #37
0
def create_or_update_users_and_groups(domain,
                                      user_specs,
                                      group_specs,
                                      task=None):
    from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
    custom_data_validator = UserFieldsView.get_validator(domain)
    ret = {"errors": [], "rows": []}
    total = len(user_specs) + len(group_specs)

    def _set_progress(progress):
        if task is not None:
            DownloadBase.set_progress(task, progress, total)

    group_memoizer = create_or_update_groups(domain, group_specs, log=ret)
    current = len(group_specs)

    usernames = set()
    user_ids = set()
    allowed_groups = set(group_memoizer.groups)
    allowed_group_names = [group.name for group in allowed_groups]
    allowed_roles = UserRole.by_domain(domain)
    roles_by_name = {role.name: role for role in allowed_roles}
    can_assign_locations = domain_has_privilege(domain, privileges.LOCATIONS)
    # ToDo: We need more speccing on what/how locations can be assigned if location-restrictions is enabled
    #       For now, don't support bulk assigning if location-restrictions are enabled
    can_assign_locations = can_assign_locations and not toggles.RESTRICT_WEB_USERS_BY_LOCATION.enabled(
        domain)
    if can_assign_locations:
        location_cache = SiteCodeToLocationCache(domain)
    project = Domain.get_by_name(domain)
    usernames_with_dupe_passwords = users_with_duplicate_passwords(user_specs)

    try:
        for row in user_specs:
            _set_progress(current)
            current += 1

            data = row.get('data')
            email = row.get('email')
            group_names = list(map(six.text_type, row.get('group') or []))
            language = row.get('language')
            name = row.get('name')
            password = row.get('password')
            phone_number = row.get('phone-number')
            uncategorized_data = row.get('uncategorized_data')
            user_id = row.get('user_id')
            username = row.get('username')
            location_codes = row.get('location_code') or []
            if location_codes and not isinstance(location_codes, list):
                location_codes = [location_codes]
            # ignore empty
            location_codes = [code for code in location_codes if code]
            role = row.get('role', '')

            if password:
                password = six.text_type(password)
            try:
                username = normalize_username(str(username), domain)
            except TypeError:
                username = None
            except ValidationError:
                ret['rows'].append({
                    'username':
                    username,
                    'row':
                    row,
                    'flag':
                    _('username cannot contain spaces or symbols'),
                })
                continue
            status_row = {
                'username': raw_username(username) if username else None,
                'row': row,
            }

            is_active = row.get('is_active')
            if isinstance(is_active, six.string_types):
                try:
                    is_active = string_to_boolean(
                        is_active) if is_active else None
                except ValueError:
                    ret['rows'].append({
                        'username':
                        username,
                        'row':
                        row,
                        'flag':
                        _("'is_active' column can only contain 'true' or 'false'"
                          ),
                    })
                    continue

            if username in usernames or user_id in user_ids:
                status_row['flag'] = 'repeat'
            elif not username and not user_id:
                status_row['flag'] = 'missing-data'
            else:
                try:
                    if username:
                        usernames.add(username)
                    if user_id:
                        user_ids.add(user_id)
                    if user_id:
                        user = CommCareUser.get_by_user_id(user_id, domain)
                    else:
                        user = CommCareUser.get_by_username(username)

                    if project.strong_mobile_passwords and is_password(
                            password):
                        if raw_username(
                                username) in usernames_with_dupe_passwords:
                            raise UserUploadError(
                                _("Provide a unique password for each mobile worker"
                                  ))

                        try:
                            clean_password(password)
                        except forms.ValidationError:
                            if settings.ENABLE_DRACONIAN_SECURITY_FEATURES:
                                msg = _(
                                    "Mobile Worker passwords must be 8 "
                                    "characters long with at least 1 capital "
                                    "letter, 1 special character and 1 number")
                            else:
                                msg = _("Please provide a stronger password")
                            raise UserUploadError(msg)

                    if user:
                        if user.domain != domain:
                            raise UserUploadError(
                                _('User with username %(username)r is '
                                  'somehow in domain %(domain)r') % {
                                      'username': user.username,
                                      'domain': user.domain
                                  })
                        if username and user.username != username:
                            raise UserUploadError(
                                _('Changing usernames is not supported: %(username)r to %(new_username)r'
                                  ) % {
                                      'username': user.username,
                                      'new_username': username
                                  })
                        if is_password(password):
                            user.set_password(password)
                        status_row['flag'] = 'updated'
                    else:
                        max_username_length = get_mobile_worker_max_username_length(
                            domain)
                        if len(raw_username(username)) > max_username_length:
                            ret['rows'].append({
                                'username':
                                username,
                                'row':
                                row,
                                'flag':
                                _("username cannot contain greater than %d characters"
                                  % max_username_length)
                            })
                            continue
                        if not is_password(password):
                            raise UserUploadError(
                                _("Cannot create a new user with a blank password"
                                  ))
                        user = CommCareUser.create(domain,
                                                   username,
                                                   password,
                                                   commit=False)
                        status_row['flag'] = 'created'
                    if phone_number:
                        user.add_phone_number(_fmt_phone(phone_number),
                                              default=True)
                    if name:
                        user.set_full_name(six.text_type(name))
                    if data:
                        error = custom_data_validator(data)
                        if error:
                            raise UserUploadError(error)
                        user.user_data.update(data)
                    if uncategorized_data:
                        user.user_data.update(uncategorized_data)
                    if language:
                        user.language = language
                    if email:
                        try:
                            validate_email(email)
                        except ValidationError:
                            raise UserUploadError(
                                _("User has an invalid email address"))

                        user.email = email.lower()
                    if is_active is not None:
                        user.is_active = is_active

                    if can_assign_locations:
                        # Do this here so that we validate the location code before we
                        # save any other information to the user, this way either all of
                        # the user's information is updated, or none of it
                        location_ids = []
                        for code in location_codes:
                            loc = get_location_from_site_code(
                                code, location_cache)
                            location_ids.append(loc.location_id)

                    if role:
                        if role in roles_by_name:
                            user.set_role(
                                domain, roles_by_name[role].get_qualified_id())
                        else:
                            raise UserUploadError(
                                _("Role '%s' does not exist") % role)

                    # following blocks require user doc id, so it needs to be saved if new user
                    user.save()
                    if can_assign_locations:
                        locations_updated = set(
                            user.assigned_location_ids) != set(location_ids)
                        primary_location_removed = (
                            user.location_id and not location_ids
                            or user.location_id not in location_ids)

                        if primary_location_removed:
                            user.unset_location()
                        if locations_updated:
                            user.reset_locations(location_ids)

                    if is_password(password):
                        # Without this line, digest auth doesn't work.
                        # With this line, digest auth works.
                        # Other than that, I'm not sure what's going on
                        user.get_django_user().check_password(password)

                    for group_id in Group.by_user(user, wrap=False):
                        group = group_memoizer.get(group_id)
                        if group.name not in group_names:
                            group.remove_user(user)

                    for group_name in group_names:
                        if group_name not in allowed_group_names:
                            raise UserUploadError(
                                _("Can't add to group '%s' "
                                  "(try adding it to your spreadsheet)") %
                                group_name)
                        group_memoizer.by_name(group_name).add_user(user,
                                                                    save=False)

                except (UserUploadError, CouchUser.Inconsistent) as e:
                    status_row['flag'] = six.text_type(e)

            ret["rows"].append(status_row)
    finally:
        try:
            group_memoizer.save_all()
        except BulkSaveError as e:
            _error_message = (
                "Oops! We were not able to save some of your group changes. "
                "Please make sure no one else is editing your groups "
                "and try again.")
            logging.exception(('BulkSaveError saving groups. '
                               'User saw error message "%s". Errors: %s') %
                              (_error_message, e.errors))
            ret['errors'].append(_error_message)

    _set_progress(total)
    return ret
Пример #38
0
def server_error(e):
    # Log the error and stacktrace.
    logging.exception('An error occurred during a request.')
    return 'An internal error occurred.', 500    
Пример #39
0
	def call(self):
		try:
			self.func()
		except Exception as e:
			logging.exception("Exception raised during queued message")
Пример #40
0
                else:
                    ckpt_path = get_best_checkpoint_path(args.run_dir)
                load_model_state(model, ckpt_path, args.cuda, skip_task_models=[], strict=strict)

                tasks = [task]
                if task.name == 'mnli':
                    tasks += [t for t in target_tasks if t.name == 'mnli-diagnostic']
                evaluate_and_write(args, model, tasks, splits_to_write)

        elif args.transfer_paradigm == "frozen":
            evaluate_and_write(args, model, target_tasks, splits_to_write)

    log.info("Done!")


if __name__ == '__main__':
    try:
        main(sys.argv[1:])
        if EMAIL_NOTIFIER is not None:
            EMAIL_NOTIFIER(body="Run completed successfully!", prefix="")
    except BaseException as e:
        # Make sure we log the trace for any crashes before exiting.
        log.exception("Fatal error in main():")
        if EMAIL_NOTIFIER is not None:
            import traceback
            tb_lines = traceback.format_exception(*sys.exc_info())
            EMAIL_NOTIFIER(body="".join(tb_lines), prefix="FAILED")
        raise e  # re-raise exception, in case debugger is attached.
        sys.exit(1)
    sys.exit(0)
Пример #41
0
def process_vos(options, vo_ids, storage_name, client, datestamp, host_institute=None):
    """Process the virtual organisations.

    - make the fileset per VO
    - set the quota for the complete fileset
    - set the quota on a per-user basis for all VO members
    """

    listm = Monoid([], lambda xs, ys: xs + ys)
    ok_vos = MonoidDict(copy.deepcopy(listm))
    error_vos = MonoidDict(copy.deepcopy(listm))

    for vo_id in sorted(vo_ids):

        vo = VscTier2AccountpageVo(vo_id, rest_client=client)
        vo.dry_run = options.dry_run

        try:
            if storage_name in [VSC_HOME]:
                continue

            if storage_name in [VSC_DATA] and vo_id not in INSTITUTE_VOS_GENT.values():
                vo.create_data_fileset()
                vo.set_data_quota()
                update_vo_status(vo, client)

            if storage_name in [VSC_DATA_SHARED] and vo_id not in INSTITUTE_VOS_GENT.values() and vo.data_sharing:
                vo.create_data_shared_fileset()
                vo.set_data_shared_quota()

            if vo_id == INSTITUTE_VOS_GENT[GENT]:
                logging.info("Not deploying default VO %s members" % (vo_id,))
                continue

            if storage_name in GENT_PRODUCTION_SCRATCH:
                vo.create_scratch_fileset(storage_name)
                vo.set_scratch_quota(storage_name)

            if vo_id in INSTITUTE_VOS_GENT.values() and storage_name in (VSC_HOME, VSC_DATA):
                logging.info("Not deploying default VO %s members on %s", vo_id, storage_name)
                continue

            modified_member_list = client.vo[vo.vo_id].member.modified[datestamp].get()
            factory = lambda vid: VscTier2AccountpageUser(vid,
                                                          rest_client=client,
                                                          host_institute=host_institute,
                                                          use_user_cache=True)
            modified_members = [factory(a["vsc_id"]) for a in modified_member_list[1]]

            for member in modified_members:
                try:
                    member.dry_run = options.dry_run
                    if storage_name in [VSC_DATA]:
                        vo.set_member_data_quota(member)  # half of the VO quota
                        vo.create_member_data_dir(member)

                    if storage_name in GENT_PRODUCTION_SCRATCH:
                        vo.set_member_scratch_quota(storage_name, member)  # half of the VO quota
                        vo.create_member_scratch_dir(storage_name, member)

                    ok_vos[vo.vo_id] = [member.account.vsc_id]
                except Exception:
                    logging.exception("Failure at setting up the member %s of VO %s on %s" %
                                      (member.account.vsc_id, vo.vo_id, storage_name))
                    error_vos[vo.vo_id] = [member.account.vsc_id]
        except Exception:
            logging.exception("Something went wrong setting up the VO %s on the storage %s" % (vo.vo_id, storage_name))
            error_vos[vo.vo_id] = vo.members

    return (ok_vos, error_vos)
Пример #42
0
	def wrapper(*args, **kwargs):
		try:
			func(*args, **kwargs)
		except Exception as e:
			logging.exception("Exception raised in event handler")
Пример #43
0
    if options.category:
        category = options.category

    if options.repository:
        repository = options.repository

    if options.project:
        project = options.project

    if options.codebase:
        codebase = options.codebase

    if options.username:
        username = options.username

    if options.auth:
        auth = options.auth

    if options.encoding:
        encoding = options.encoding

    if options.first_parent:
        first_parent = options.first_parent

    process_changes()
    send_changes()
except Exception:
    logging.exception("Unhandled exception")
    sys.exit(1)
Пример #44
0
    __table_args__ = {
        'mysql_engine': 'InnoDB',
        'mysql_charset': 'utf8'
    }

    index = Column(Integer, nullable = False, primary_key = True)
    username = Column(String(100), nullable=False)
    email = Column(String(100), nullable=False)
    asin = Column(String(20), nullable=False)
    stars = Column(Float, nullable=False)
    product_at_page = Column(Integer, nullable = False)
    product_index = Column(String(20), nullable=False)
    keyword = Column(String(100), nullable=False)

def db_connect():
    return create_engine(URL(**DB_SETTING), pool_size=10, pool_recycle=3600, max_overflow=20)

if __name__ == '__main__':
    logger = logging.getLogger('')
    hdlr = logging.StreamHandler()
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    hdlr.setFormatter(formatter)
    logger.addHandler(hdlr)
    logger.setLevel(logging.DEBUG)

    try:
        Base.metadata.create_all(db_connect())
    except Exception:
        logging.exception('Create Failed !')
    else:
        logging.info('Created !')
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--debug', action='store_true', default=False)
    parser.add_argument('--logfile', type=str,
                        help='If specified will log to destination instead of stdout')
    parser.add_argument('--logstash-host', type=str,
                        help='Format: localhost:12345')
    parser.add_argument('--type', type=str, default='cloudtrail',
                        help='Logstash type attribute to set')

    subparser = parser.add_subparsers(title='subcommands', dest="subcommand")

    monitor_parser = subparser.add_parser('monitor',
                                          help='listen for new cloudtrail events')
    monitor_parser.add_argument('--queue-name', type=str, required=True,
                                help='The SQS name to listen for events')
    monitor_parser.add_argument('--num-messages', type=int, default=1,
                                help='Number of items to fetch off queue')
    monitor_parser.set_defaults(func=monitor)

    backfill_parser = subparser.add_parser(
        'backfill', description='Backfills cloudtrail logs from the date range specified in --start and --end.  The range is [start, end)',
        help='backfill old cloudtrail events from s3 bucket')
    backfill_parser.add_argument('--prefix', type=str, default='',
                                 help='Prefix for S3 bucket set during cloudtrail setup')
    backfill_parser.add_argument('--bucket', type=str, required=True,
                                 help='S3 bucket where cloudtrail logs are stored')
    backfill_parser.add_argument('--region', type=str,
                                 help='Filter logs only from this region')
    backfill_parser.add_argument('--account-id', type=str,
                                 help='Filter logs for only this account id. Useful for cases where you aggregate multiple accounts into one cloudtrail bucket.  Default behavior is to iterate over all account ids found.')
    backfill_parser.add_argument('--start', type=str, required=True,
                                 help='Starting date in format %%Y%%m%%d, eg 20141021')
    backfill_parser.add_argument('--end', type=str, required=True,
                                 help='Ending date in format %%Y%%m%%d, eg 20141022. Note ending date is exclusive')
    backfill_parser.set_defaults(func=backfill)

    args = parser.parse_args()

    if args.logstash_host and ':' not in args.logstash_host:
        logging.error('logstash host format is hostname:port')
        return -1

    if args.debug:
        level = logging.DEBUG
    else:
        level = logging.INFO

    log_format = '%(asctime)s - %(levelname)s - %(module)s - %(message)s'
    logging.basicConfig(console=True, level=level, format=log_format)
    if args.logfile:
        logging.basicConfig(console=False, format=log_format,
                            filename=args.logfile)

    # Test connection to logstash first
    logstash_host = args.logstash_host.split(':')

    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    try:
        s.connect((logstash_host[0], int(logstash_host[1])))
    except:
        logging.exception('Error connecting to logstash server')
        return -1

    args.func(args)
Пример #46
0
 def _wrapped(*args, **kwargs):
     try:
         return func(*args, **kwargs)
     except:
         logging.exception("@debug")
Пример #47
0
#
# logging.basicConfig(format='%(process)d-%(levelname)s-%(message)s')
# logging.warning('This is a Warning')
#
# logging.basicConfig(format='%(asctime)s - %(message)s', level=logging.INFO)
# logging.info('Admin logged in')
#
# logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%d-%b-%y %H:%M:%S')
# logging.warning('Admin logged out')
#
# for n in range(1,5):
#     logging.warning(f"{n}")

# import logging
#
a = 5
b = 0

try:
    c = a / b
except Exception:
    logging.exception("Key Error")
print("program execute...")
#

logging.debug("This is a debug message")
logging.info("This is an info message")
logging.warning("This is a warning message")
logging.error("This is an error message")
logging.critical("This is a critical message")
Пример #48
0
    # Disable SSL warnings: https://urllib3.readthedocs.io/en/latest/advanced-usage.html#ssl-warnings
    urllib3.disable_warnings()

    if 'KUBERNETES_PORT' in os.environ:
        kubernetes.config.load_incluster_config()
    else:
        kubernetes.config.load_kube_config()
    k8s_client = kubernetes.client.api_client.ApiClient(
        kubernetes.client.Configuration())
    dyn_client = openshift.dynamic.DynamicClient(k8s_client)

    v1_persistent_volume = dyn_client.resources.get(api_version='v1',
                                                    kind='PersistentVolume')

    interval = int(os.getenv('GLUSTER_METRICS_INTERVAL', '300'))
    prometheus_client.start_http_server(8080)
    while True:
        try:
            pvs = v1_persistent_volume.get().items
            pvcs = {
                pv['spec']['glusterfs']['path']: {
                    'namespace': pv['spec']['claimRef']['namespace'],
                    'name': pv['spec']['claimRef']['name']
                }
                for pv in pvs if pv.spec.glusterfs
            }
            collect_gluster_metrics()
        except Exception as e:
            logging.exception(e)
        time.sleep(interval)
Пример #49
0
def api(accessor=''):
    """
    The function that serves up all the metrics. Given some path/to/a/metric it will
    retrieve the metric and do the necessary walking of the tree.

    :param accessor: The path/to/the/desired/metric
    :type accessor: unicode
    :rtype: flask.Response
    """

    # Setup sane/safe arguments for actually getting the data. We take in all
    # arguments that were passed via GET/POST. If they passed a config variable
    # we clobber it, as we trust what is in the config.
    sane_args = {}
    for value in request.values:
        sane_args[value] = request.args.getlist(value)

    # Set the full requested path
    full_path = request.path

    # Set the accessor and variables
    sane_args['debug'] = request.args.get('debug', False)
    sane_args['remote_addr'] = request.remote_addr
    sane_args['accessor'] = accessor

    # Add config to sane_args
    config = listener.config['iconfig']
    sane_args['config'] = config

    # Check if we are running a check or not
    if not 'check' in sane_args:
        sane_args['check'] = request.args.get('check', False)

    # Try to get the node that was specified
    try:
        node = psapi.getter(accessor, config, full_path, request.args)
    except ValueError as exc:
        logging.exception(exc)
        return error(msg='Referencing node that does not exist: %s' % accessor)
    except IndexError as exc:
        # Hide the actual exception and just show nice output to users about changes in the API functionality
        return error(
            msg=
            'Could not access location specified. Changes to API calls were made in NCPA v1.7, check documentation on making API calls.'
        )

    # Check for default unit in the config values
    default_units = get_config_value('general', 'default_units')
    if default_units:
        if not 'units' in sane_args:
            sane_args['units'] = default_units

    if sane_args['check']:
        value = node.run_check(**sane_args)
    else:
        value = node.walk(**sane_args)

    # Generate page and add cross-domain loading
    json_data = json.dumps(dict(value),
                           ensure_ascii=False,
                           indent=None if request.is_xhr else 4)
    response = Response(json_data, mimetype='application/json')
    response.headers['Access-Control-Allow-Origin'] = '*'
    return response
Пример #50
0
def main():

    global logfile

    AP_ESCAPE       = "Escape character is '^]'."
    AP_USERNAME     = "******"
    AP_PASSWORD     = "******"
    AP_EN           = "en"
    AP_MORE         = "--More--"
    AP_EXIT         = "exit"
    LF_PROMPT       = "$"
    CR = "\r\n"

    
    parser = argparse.ArgumentParser(description="Cisco AP Control Script")
    parser.add_argument("-a", "--prompt",  type=str, help="ap prompt")
    parser.add_argument("-d", "--dest",    type=str, help="address of the AP  172.19.27.55")
    parser.add_argument("-o", "--port",    type=int, help="control port on the AP, 2008")
    parser.add_argument("-u", "--user",    type=str, help="credential login/username, admin")
    parser.add_argument("-p", "--passwd",  type=str, help="credential password Wnbulab@123")
    parser.add_argument("-s", "--scheme",  type=str, choices=["serial", "ssh", "telnet"], help="Connect via serial, ssh or telnet")
    parser.add_argument("-t", "--tty",     type=str, help="tty serial device for connecting to AP")
    parser.add_argument("-l", "--log",     type=str, help="logfile for messages, stdout means output to console",default="stdout")
    parser.add_argument("-z", "--action",  type=str, help="action,  current action is powercfg")
    parser.add_argument("-b", "--baud",    type=str, help="action,  baud rate lanforge: 115200  cisco: 9600")

    args = None
    try:
        args = parser.parse_args()
        host = args.dest
        scheme = args.scheme
        port = (default_ports[scheme], args.port)[args.port != None]
        user = args.user
        if (args.log != None):
            logfile = args.log
    except Exception as e:
        logging.exception(e)
        usage()
        exit(2)
    console_handler = logging.StreamHandler()
    formatter = logging.Formatter(FORMAT)
    logg = logging.getLogger(__name__)
    logg.setLevel(logging.DEBUG)
    file_handler = None
    if (logfile is not None):
        if (logfile != "stdout"):
            file_handler = logging.FileHandler(logfile, "w")
            file_handler.setLevel(logging.DEBUG)
            file_handler.setFormatter(formatter)
            logg.addHandler(file_handler)
            logging.basicConfig(format=FORMAT, handlers=[file_handler])
        else:
            # stdout logging
            logging.basicConfig(format=FORMAT, handlers=[console_handler])
    egg = None # think "eggpect"
    ser = None
    try:
        if (scheme == "serial"):
            #eggspect = pexpect.fdpexpect.fdspan(telcon, logfile=sys.stdout.buffer)
            ser = serial.Serial(args.tty, int(args.baud), timeout=5)
            print("Created serial connection on %s, open: %s"%(args.tty, ser.is_open))
            egg = SerialSpawn(ser)
            egg.logfile = FileAdapter(logg)
            time.sleep(1)
            egg.sendline(CR)
            time.sleep(1)

        elif (scheme == "ssh"):
            if (port is None):
                port = 22
            cmd = "ssh -p%d %s@%s"%(port, user, host)
            logg.info("Spawn: "+cmd+NL)
            egg = pexpect.spawn(cmd)
            #egg.logfile_read = sys.stdout.buffer
            egg.logfile = FileAdapter(logg)
        elif (scheme == "telnet"):
            if (port is None):
                port = 23
            cmd = "telnet {} {}".format(host, port)
            logg.info("Spawn: "+cmd+NL)
            egg = pexpect.spawn(cmd)
            egg.logfile = FileAdapter(logg)
            # Will login below as needed.
        else:
            usage()
            exit(1)
    except Exception as e:
        logging.exception(e)
    
    AP_PROMPT       = "{}>".format(args.prompt)
    AP_HASH         = "{}#".format(args.prompt)
    time.sleep(0.1)
    logged_in  = False
    loop_count = 0
    while (loop_count <= 8 and logged_in == False):
        loop_count += 1
        i = egg.expect_exact([AP_ESCAPE,AP_PROMPT,AP_HASH,AP_USERNAME,AP_PASSWORD,AP_MORE,LF_PROMPT,pexpect.TIMEOUT],timeout=5)
        if i == 0:
            logg.info("Expect: {} i: {} before: {} after: {}".format(AP_ESCAPE,i,egg.before,egg.after))
            egg.sendline(CR) # Needed after Escape or should just do timeout and then a CR?
            sleep(1)
        if i == 1:
            logg.info("Expect: {} i: {} before: {} after: {}".format(AP_PROMPT,i,egg.before,egg.after))
            egg.sendline(AP_EN) 
            sleep(1)
            j = egg.expect_exact([AP_PASSWORD,pexpect.TIMEOUT],timeout=5)
            if j == 0:
                logg.info("Expect: {} i: {} j: {} before: {} after: {}".format(AP_PASSWORD,i,j,egg.before,egg.after))
                egg.sendline(args.passwd) 
                sleep(1)
                k = egg.expect_exact([AP_HASH,pexpect.TIMEOUT],timeout=5)
                if k == 0:
                    logg.info("Expect: {} i: {} j: {} k: {} before: {} after: {}".format(AP_PASSWORD,i,j,k,egg.before,egg.after))
                    logged_in = True
                if k == 1:
                    logg.info("Expect: {} i: {} j: {} k: {} before: {} after: {}".format("Timeout",i,j,k,egg.before,egg.after))
            if j == 1:
                logg.info("Expect: {} i: {} j: {} before: {} after: {}".format("Timeout",i,j,egg.before,egg.after))

        if i == 2:
            logg.info("Expect: {} i: {} before: {} after: {}".format(AP_HASH,i,egg.before,egg.after))
            logged_in = True 
            sleep(1)
        if i == 3:
            logg.info("Expect: {} i: {} before: {} after: {}".format(AP_USERNAME,i,egg.before,egg.after))
            egg.sendline(args.user) 
            sleep(1)
        if i == 4:
            logg.info("Expect: {} i: {} before: {} after: {}".format(AP_PASSWORD,i,egg.before,egg.after))
            egg.sendline(args.passwd) 
            sleep(1)
        if i == 5:
            logg.info("Expect: {} i: {} before: {} after: {}".format(AP_MORE,i,egg.before,egg.after))
            if (scheme == "serial"):
                egg.sendline("r")
            else:
                egg.sendcontrol('c')
            sleep(1)
        # for Testing serial connection using Lanforge
        if i == 6:
            logg.info("Expect: {} i: {} before: {} after: {}".format(LF_PROMPT,i,egg.before.decode('utf-8', 'ignore'),egg.after.decode('utf-8', 'ignore')))
            if (loop_count < 3):
                egg.send("ls -lrt")
                sleep(1)
            if (loop_count > 4):
                logged_in = True # basically a test mode using lanforge serial
        if i == 7:
            logg.info("Expect: {} i: {} before: {} after: {}".format("Timeout",i,egg.before,egg.after))
            egg.sendline(CR) 
            sleep(1)


    if (args.action == "powercfg"):
        logg.info("execute: show controllers dot11Radio 1 powercfg | g T1")
        egg.sendline('show controllers dot11Radio 1 powercfg | g T1')
        egg.expect([pexpect.TIMEOUT], timeout=3)  # do not delete this for it allows for subprocess to see output
        print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it  allows for subprocess to see output
        i = egg.expect_exact([AP_MORE,pexpect.TIMEOUT],timeout=5)
        if i == 0:
            egg.sendcontrol('c')
        if i == 1:
            logg.info("send cntl c anyway")
            egg.sendcontrol('c')

    elif (args.action == "clear_log"):
        logg.info("execute: clear log")
        egg.sendline('clear log')
        sleep(0.4)
        egg.sendline('show log')
        egg.expect([pexpect.TIMEOUT], timeout=2)  # do not delete this for it allows for subprocess to see output
        print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it  allows for subprocess to see output
        # allow for normal logout below

    elif (args.action == "show_log"):
        logg.info("execute: show log")
        egg.sendline('show log')
        sleep(0.4)
        egg.expect([pexpect.TIMEOUT], timeout=2)  # do not delete this for it allows for subprocess to see output
        print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it  allows for subprocess to see output
        i = egg.expect_exact([AP_MORE,pexpect.TIMEOUT],timeout=4)
        if i == 0:
            egg.sendline('r')
            egg.expect([pexpect.TIMEOUT], timeout=4)  # do not delete this for it allows for subprocess to see output
            print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it  allows for subprocess to see output
        if i == 1:
            print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it  allows for subprocess to see output
        # allow for normal logout below
        # show log | g DOT11_DRV

    # CAC_EXPIRY_EVT: CAC finished on DFS channel 52
    elif (args.action == "cac_expiry_evt"):
        logg.info("execute: show log | g CAC_EXPIRY_EVT")    
        egg.sendline('show log | g CAC_EXPIRY_EVT')
        sleep(0.4)
        egg.expect([pexpect.TIMEOUT], timeout=2)  # do not delete this for it allows for subprocess to see output
        print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it  allows for subprocess to see output
        i = egg.expect_exact([AP_MORE,pexpect.TIMEOUT],timeout=4)
        if i == 0:
            egg.sendline('r')
            egg.expect([pexpect.TIMEOUT], timeout=4)  # do not delete this for it allows for subprocess to see output
            print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it  allows for subprocess to see output
        if i == 1:
            print(egg.before.decode('utf-8', 'ignore')) # do not delete this for it  allows for subprocess to see output

    elif (args.action == "ds_data_5ghz"):
        logg.info("execute: wl -i wl1 bs_data")
        egg.sendline('wl -i wl1 bs_data')
        egg.expect([pexpect.TIMEOUT], timeout=4) # do not detete this for it allow for subprocess to read
        print(egg.before.decode('utf-8','ignore')) # do not delete this for it  allows for subprocess to see output


    elif (args.action == "ds_data_24ghz"):
        logg.info("execute: wl -i wl0 bs_data")
        egg.sendline('wl -i wl1 bs_data')
        egg.expect([pexpect.TIMEOUT], timeout=4) # do not detete this for it allow for subprocess to read
        print(egg.before.decode('utf-8','ignore')) # do not delete this for it  allows for subprocess to see output


    else: # no other command at this time so send the same power command
        #logg.info("no action so execute: show controllers dot11Radio 1 powercfg | g T1")
        logg.info("no action")

    i = egg.expect_exact([AP_PROMPT,AP_HASH,pexpect.TIMEOUT],timeout=1)
    if i == 0:
        logg.info("received {} we are done send exit".format(AP_PROMPT))
        egg.sendline(AP_EXIT)
    if i == 1:
        logg.info("received {} send exit".format(AP_HASH))
        egg.sendline(AP_EXIT)
    if i == 2:
        logg.info("timed out waiting for {} or {}".format(AP_PROMPT,AP_HASH))
Пример #51
0
def app_validate(resource,appkernel,nnodes,verbose=False):
    globals()['verbose']=verbose
    resource_name=resource
    app_name=appkernel
    
        
    errorCount=0
    warningCount=0
        
    log.info("Validating "+app_name+" application kernel installation on "+resource_name)
    
    from akrr import get_akrr_dirs
    
    akrr_dirs=get_akrr_dirs()
    
    default_resource_param_filename=os.path.abspath(os.path.join(akrr_dirs['default_dir'],"default.resource.conf"))
    resource_param_filename=os.path.abspath(os.path.join(akrr_dirs['cfg_dir'],"resources",resource_name,"resource.conf"))
    
    default_app_param_filename=os.path.abspath(os.path.join(akrr_dirs['default_dir'],"default.app.conf"))
    app_ker_param_filename=os.path.abspath(os.path.join(akrr_dirs['default_dir'],app_name+".app.conf"))
    ###############################################################################################
    #validating resource parameter file
    
    log.info("#"*80)
    log.info("Validating %s parameters from %s"%(resource_name,resource_param_filename))
    
    if not os.path.isfile(resource_param_filename):
        log.error("resource parameters file (%s) do not exists!"%(resource_param_filename,))
        exit()
    
    #check syntax
    try:
        tmp={}
        exec(compile(open(default_resource_param_filename).read(), default_resource_param_filename, 'exec'),tmp)
        exec(compile(open(resource_param_filename).read(), resource_param_filename, 'exec'),tmp)
    except Exception:
        log.exception("Can not load resource from """+resource_param_filename+"\n"+
               "Probably invalid syntax.")
        exit(1)
    #check syntax
    try:
        tmp={}
        exec(compile(open(default_app_param_filename).read(), default_app_param_filename, 'exec'),tmp)
        exec(compile(open(app_ker_param_filename).read(), app_ker_param_filename, 'exec'),tmp)
    except Exception:
        log.exception("Can not load application kernel from """+app_ker_param_filename+"\n"+
               "Probably invalid syntax")
        exit(1)
    
    #now we can load akrr
    from . import cfg
    from . import akrrrestclient
    from .resource_deploy import makeResultsSummary

    resource=cfg.FindResourceByName(resource_name)
    log.info("Syntax of %s is correct and all necessary parameters are present."%resource_param_filename,highlight="ok")
    
    app=cfg.FindAppByName(app_name)
    #check the presence of runScript[resource]
    #if resource_name not in app['runScript'] and 'default' not in app['runScript']:
    #    logerr("Can not load application kernel from """+app_ker_param_filename+"\n"+
    #           "runScript['%s'] is not set"%(resource_name,))
    #    exit()
    log.info("Syntax of %s is correct and all necessary parameters are present."%app_ker_param_filename,highlight="ok")
    
    #check if AK is in DB
    if True:
        #add entry to mod_appkernel.resource
        dbAK,curAK=cfg.getAKDB(True)
            
        curAK.execute('''SELECT * FROM app_kernel_def WHERE ak_base_name=%s''', (app_name,))
        ak_in_AKDB = curAK.fetchall()
        if len(ak_in_AKDB)==0:
            curAK.execute('''INSERT INTO app_kernel_def (name,ak_base_name,processor_unit,enabled, description, visible)
                        VALUES(%s,%s,'node',0,%s,0);''',
                        (app_name,app_name,app_name))
            dbAK.commit()
        curAK.execute('''SELECT * FROM app_kernel_def WHERE ak_base_name=%s''', (app_name,))
        ak_in_AKDB = curAK.fetchall()[0]
        #add entry to mod_akrr.resource
        db,cur=cfg.getDB(True)
            
        cur.execute('''SELECT * FROM app_kernels WHERE name=%s''', (app_name,))
        ak_in_DB = cur.fetchall()
        if len(ak_in_DB)==0:
            cur.execute('''INSERT INTO app_kernels (id,name,enabled,nodes_list)
                        VALUES(%s,%s,0,'1,2,4,8');''',
                        (ak_in_AKDB['ak_def_id'],app_name))
            db.commit()
            
    ###############################################################################################
    #connect to resource
    log.info("#"*80)
    log.info("Validating resource accessibility. Connecting to %s."%(resource['name']))
    if resource['sshPrivateKeyFile']!=None and os.path.isfile(resource['sshPrivateKeyFile'])==False:
        log.error("Can not access ssh private key (%s)"""%(resource['sshPrivateKeyFile'],))
        exit()
    
    str_io=io.StringIO()
    try:
        sys.stdout = sys.stderr = str_io
        rsh=cfg.sshResource(resource)
        
        sys.stdout=sys.__stdout__
        sys.stderr=sys.__stderr__
    except Exception as e:
        msg2=str_io.getvalue()
        msg2+="\n"+traceback.format_exc()
        sys.stdout=sys.__stdout__
        sys.stderr=sys.__stderr__
        log.error("Can not connect to """+resource['name']+"\n"+
               "Probably invalid credential, see full error report below",msg2)
        exit()
    print("="*80)
    log.info("Successfully connected to %s\n\n"%(resource['name']),highlight="ok")
    
    ###############################################################################################
    log.info("Checking directory locations\n")
    
    d=resource['akrrData']
    log.info("Checking: %s:%s"%(resource['remoteAccessNode'],d))
    status,msg=CheckDir(rsh, d,exitOnFail=True,tryToCreate=True)
    log.info(msg+"\n",highlight="ok")
    
    d=resource['appKerDir']
    log.info("Checking: %s:%s"%(resource['remoteAccessNode'],d))
    status,msg=CheckDir(rsh, d,exitOnFail=True,tryToCreate=True)
    log.info(msg+"\n",highlight="ok")
    
    d=resource['networkScratch']
    log.info("Checking: %s:%s"%(resource['remoteAccessNode'],d))
    status,msg=CheckDir(rsh, d,exitOnFail=False,tryToCreate=False)
    if status==True:
        log.info(msg,highlight="ok")
    else:
        log.info(msg,highlight="warning")
        log.info("WARNING %d: network scratch might be have a different location on head node, so if it is by design it is ok"%(warningCount+1),highlight="warning")
        warningCount+=1
    log.info("")
    
    d=resource['localScratch']
    log.info("Checking: %s:%s"%(resource['remoteAccessNode'],d))
    status,msg=CheckDir(rsh, d,exitOnFail=False,tryToCreate=False)
    if status==True:
        log.info(msg,highlight="ok")
    else:
        log.info(msg,highlight="warning")
        log.info("WARNING %d: local scratch might be have a different location on head node, so if it is by design it is ok"%(warningCount+1),highlight="warning")
        warningCount+=1
    log.info("")
    
    
    #close connection we don't need it any more
    rsh.close(force=True)
    del rsh    
    ###############################################################################################
    #send test job to queue
    
    log.info("#"*80)
    log.info("Will send test job to queue, wait till it executed and will analyze the output")
    
    print("Will use AKRR REST API at",akrrrestclient.restapi_host)
    #get check connection 
    try:
        r = akrrrestclient.get('/scheduled_tasks')
        if r.status_code!=200:
            log.error("Can not get token for AKRR REST API ( """+akrrrestclient.restapi_host+" )\n"+
               "See server response below",json.dumps(r.json(),indent=4))
            exit()
    except Exception as e:
        log.error("Can not connect to AKRR REST API ( """+akrrrestclient.restapi_host+" )\n"+
               "Is it running?\n"+
               "See full error report below",traceback.format_exc())
        exit()
    
    #check if the test job is already submitted
    task_id=None
    test_job_lock_filename=os.path.join(cfg.data_dir, resource_name + "_" + app_name + "_test_task.dat")
    if os.path.isfile(test_job_lock_filename):
        fin=open(test_job_lock_filename,"r")
        task_id=int(fin.readline())
        fin.close()
        
        r = akrrrestclient.get('/tasks/'+str(task_id))
        if r.status_code!=200:
            task_id=None
        else:
            log.info("\nWARNING %d: Seems this is rerun of this script, will monitor task with task_id = "%(warningCount+1)+str(task_id),highlight="warning")
            log.info("To submit new task delete "+test_job_lock_filename+"\n",highlight="warning")
            warningCount+=1
        #check how old is it
    #submit test job
    if task_id==None:
        try:
            payload={'resource':resource_name,
                     'app':app_name,
                     'resource_param':"{'nnodes':%d}"%nnodes,
                     'task_param':"{'test_run':True}"
                     }
            r = akrrrestclient.post('/scheduled_tasks', data=payload)
            if r.status_code!=200:
                log.error("Can not submit task through AKRR REST API ( """+akrrrestclient.restapi_host+" )\n"+
                   "See server response below",json.dumps(r.json(),indent=4))
                exit()
            task_id=r.json()['data']['task_id']
        except Exception as e:
            log.error("Can not submit task through AKRR REST API ( """+akrrrestclient.restapi_host+" )\n"+
                   "Is it still running?\n"+
                   "See full error report below",traceback.format_exc())
            exit()
        #write file with tast_id
        fout=open(os.path.join(test_job_lock_filename),"w")
        print(task_id, file=fout)
        fout.close()
        log.info("\nSubmitted test job to AKRR, task_id is "+str(task_id)+"\n")
    #now wait till job is done
    msg_body0=""
    msg_body=""
    
    #response_json0={}
    #response_json=r.json()
    while True:
        t=datetime.datetime.now()
        #try:
        r = akrrrestclient.get('/tasks/'+str(task_id))
        
        response_json=r.json()
        if r.status_code==200:
            response_json=r.json()
            
            msg_body="="*80
            msg_body+="\nTast status:\n"
                        
            if response_json["data"]["queue"]=="scheduled_tasks":
                msg_body+="Task is in scheduled_tasks queue.\n"
                msg_body+="It schedule to be started on"+response_json["data"]["data"]['time_to_start']+"\n"
            elif response_json["data"]["queue"]=="active_tasks":
                msg_body+="Task is in active_tasks queue.\n"
                msg_body+="Status: "+str(response_json["data"]["data"]['status'])+"\n"
                msg_body+="Status info:\n"+str(response_json["data"]["data"]['statusinfo'])+"\n"
            elif response_json["data"]["queue"]=="completed_tasks":
                msg_body+="Task is completed!\n"
                completed_tasks=r.json()['data']['data']['completed_tasks']
                akrr_xdmod_instanceinfo=r.json()['data']['data']['akrr_xdmod_instanceinfo']
                akrr_errmsg=r.json()['data']['data']['akrr_errmsg']
                if verbose:
                    msg_body+="completed_tasks table entry:\n"+pp.pformat(completed_tasks)+"\n"
                    msg_body+="akrr_xdmod_instanceinfo table entry:\n"+pp.pformat(akrr_xdmod_instanceinfo)+"\n"
                    msg_body+='output parsing results:\n'+akrr_xdmod_instanceinfo['body']+"\n"
                else:
                    msg_body+="\tstatus: "+str(akrr_xdmod_instanceinfo['status'])+"\n"
                    if akrr_xdmod_instanceinfo['status']==0:
                        msg_body+="\tstatus2: "+completed_tasks['status']+"\n"
                    msg_body+="\tstatusinfo: "+completed_tasks['statusinfo']+"\n"
            else:
                msg_body+=r.text+"\n"
            
            tail_msg="time: "+t.strftime("%Y-%m-%d %H:%M:%S")
            
            if msg_body!=msg_body0:
                print("\n\n"+msg_body)
                print(tail_msg, end=' ')
                sys.stdout.flush()
            else:
                print("\r"+tail_msg, end=' ')
                sys.stdout.flush()
                
            msg_body0=copy.deepcopy(msg_body)
            
            if response_json["data"]["queue"]=="completed_tasks":
                break
        #try to update:
        try:
            payload={'next_check_time':''}
            r = akrrrestclient.put('/active_tasks/'+str(task_id), data=payload)
        except:
            pass
        time.sleep(5)
    ###############################################################################################
    #analysing the output
    log.info("\n\nTest job is completed analyzing output\n",highlight="ok")
    r = akrrrestclient.get('/tasks/'+str(task_id))
    if r.status_code!=200:
        log.error("Can not get information about task\n"+
                   "See full error report below",
                   "AKRR server response:\n"+r.text)
        exit()
    completed_tasks=r.json()['data']['data']['completed_tasks']
    akrr_xdmod_instanceinfo=r.json()['data']['data']['akrr_xdmod_instanceinfo']
    akrr_errmsg=r.json()['data']['data']['akrr_errmsg']
    
    results_summary=makeResultsSummary(verbose,resource_name,app_name,completed_tasks,akrr_xdmod_instanceinfo,akrr_errmsg)
    #execution was not successful
    if completed_tasks['status'].count("ERROR")>0:
        if completed_tasks['status'].count("ERROR Can not created batch job script and submit it to remote queue")>0:
            log.error("Can not created batch job script and/or submit it to remote queue\n"+
                   "See full error report below",
                   results_summary)
            os.remove(test_job_lock_filename)
            exit()
        else:
            log.error(completed_tasks['status']+"\n"+
                   "See full error report below",
                   results_summary)
            os.remove(test_job_lock_filename)
            exit()
    
    #execution was not successful
    if akrr_xdmod_instanceinfo['status']==0:
        log.error("Task execution was not successful\n"+
                   "See full error report below",
                   results_summary)
        os.remove(test_job_lock_filename)
        exit()
    #see what is in report
    elm_perf = ET.fromstring(akrr_xdmod_instanceinfo['body'])
    elm_parameters=elm_perf.find('benchmark').find('parameters')
    elm_statistics=elm_perf.find('benchmark').find('statistics')
    
    log.info("\nTest kernel execution summary:",highlight="ok")
    print(results_summary)
    print() 
    #log.info("\nThe output looks good.\n",highlight="ok")
    if(errorCount==0):
        #enabling resource for execution
        log.info("\nEnabling %s on %s for execution\n"%(app_name,resource_name),highlight="ok")
        try:
            result = akrrrestclient.put(
                '/resources/%s/on'%(resource_name,),
                data={'application':app_name})
            if result.status_code == 200:
                 log.info("Successfully enabled %s on %s"%(app_name,resource_name))
            else:
                if result!=None:
                    log.error("Can not turn-on %s on %s"%(app_name,resource_name),result.text)
                else:
                    log.error("Can not turn-on %s on %s"%(app_name,resource_name))
                exit(1)
            if True:
                #add entry to mod_appkernel.resource
                dbAK,curAK=cfg.getAKDB(True)
                    
                curAK.execute('''SELECT * FROM app_kernel_def WHERE ak_base_name=%s''', (app_name,))
                ak_in_AKDB = curAK.fetchall()
                if len(ak_in_AKDB)==0:
                    curAK.execute('''INSERT INTO app_kernel_def (name,ak_base_name,processor_unit,enabled, description, visible)
                                VALUES(%s,%s,'node',0,%s,0);''',
                                (app_name,app_name,app_name))
                    dbAK.commit()
                curAK.execute('''UPDATE app_kernel_def SET enabled=1,visible=1  WHERE ak_base_name=%s''', (app_name,))
                dbAK.commit()
                #add entry to mod_akrr.resource
                db,cur=cfg.getDB(True)
                    
                cur.execute('''SELECT * FROM app_kernels WHERE name=%s''', (app_name,))
                ak_in_DB = cur.fetchall()
                if len(ak_in_DB)==0:
                    cur.execute('''INSERT INTO app_kernels (id,name,enabled,nodes_list)
                                VALUES(%s,%s,0,'1,2,4,8');''',
                                (ak_in_AKDB['ak_def_id'],app_name))
                    db.commit()
                cur.execute('''UPDATE app_kernels SET enabled=1  WHERE name=%s''', (app_name,))
                db.commit()
        except:
            log.exception("Can not turn-on %s on %s",app_name,resource_name)
            exit(1)
        
    if(errorCount>0):
        log.error("There are %d errors, fix them.",errorCount)
    if(warningCount>0):
        log.info("\nThere are %d warnings.\nif warnings have sense (highlighted in yellow), you can move to next step!\n"%warningCount,highlight="warning")
    if(errorCount==0 and warningCount==0):
        log.info("\nDONE, you can move to next step!\n",highlight="ok")
    os.remove(test_job_lock_filename)
Пример #52
0
def main():
    a_args = ('-a', '--alias',)
    a_kwargs = {
        'help': 'Alias for a version of the function',
        'default': None
    }
    c_args = ('-c', '--conf-file',)
    c_kwargs = {
        'help': 'Configuration YAML file (default: .lamvery.yml)',
        'default': '.lamvery.yml'
    }
    d_args = ('-d', '--dry-run',)
    d_kwargs = {
        'help': 'Dry run',
        'action': 'store_true',
        'default': False
    }
    k_args = ('-k', '--keep-empty-events',)
    k_kwargs = {
        'help': 'Keep the event rules that does not have any targets.',
        'action': 'store_true',
        'default': False
    }
    sf_args = ('-s', '--single-file',)
    sf_kwargs = {
        'help': 'Only use the main lambda function file',
        'action': 'store_true',
        'default': False
    }
    l_args = ('-l', '--no-libs',)
    l_kwargs = {
        'help': 'Archiving without all libraries',
        'action': 'store_true',
        'default': False
    }
    n_args = ('-n', '--secret-name',)
    n_kwargs = {
        'help': 'The name of the secret value',
        'default': None
    }
    p_args = ('-p', '--publish')
    p_kwargs = {
        'help': 'Publish the version as an atomic operation',
        'action': 'store_true',
        'default': False
    }
    s_args = ('-s', '--store',)
    s_kwargs = {
        'help': 'Store encripted value to configuration file (default: .lamvery.yml)',
        'action': 'store_true',
        'default': False
    }
    v_args = ('-v', '--version',)
    v_kwargs = {
        'help': 'Version of the function',
        'default': None
    }

    parser = argparse.ArgumentParser(
        description='Yet another deploy tool for AWS Lambda in the virtualenv environment.',
        epilog='Lamvery version: {}'.format(lamvery.__version__))
    subparsers = parser.add_subparsers(title='subcommands')

    init_parser = subparsers.add_parser(
        'init',
        help='Generate initial configuration file')
    init_parser.add_argument(*c_args, **c_kwargs)
    init_parser.set_defaults(func=init)

    archive_parser = subparsers.add_parser(
        'archive',
        help='Archive your code and libraries to <your-function-name>.zip')
    archive_parser.add_argument(*c_args, **c_kwargs)
    archive_parser.add_argument(*sf_args, **sf_kwargs)
    archive_parser.add_argument(*l_args, **l_kwargs)
    archive_parser.set_defaults(func=archive)

    set_alias_parser = subparsers.add_parser(
        'set-alias',
        help='Set alias to a version of the function')
    set_alias_parser.add_argument(*a_args, **a_kwargs)
    set_alias_parser.add_argument(*c_args, **c_kwargs)
    set_alias_parser.add_argument(*d_args, **d_kwargs)
    set_alias_parser.add_argument(*v_args, **v_kwargs)
    set_alias_parser.set_defaults(func=set_alias)

    configure_parser = subparsers.add_parser(
        'configure',
        help='Update the remote configuration')
    configure_parser.add_argument(*c_args, **c_kwargs)
    configure_parser.add_argument(*d_args, **d_kwargs)
    configure_parser.set_defaults(func=configure)

    deploy_parser = subparsers.add_parser(
        'deploy',
        help='Deploy your code and libraries,' +
             'Update the remote configuration, Set alias (optional)')
    deploy_parser.add_argument(*a_args, **a_kwargs)
    deploy_parser.add_argument(*c_args, **c_kwargs)
    deploy_parser.add_argument(*d_args, **d_kwargs)
    deploy_parser.add_argument(*sf_args, **sf_kwargs)
    deploy_parser.add_argument(*l_args, **l_kwargs)
    deploy_parser.add_argument(*p_args, **p_kwargs)
    deploy_parser.set_defaults(func=deploy)

    encrypt_parser = subparsers.add_parser('encrypt', help='Encrypt a text value using KMS')
    encrypt_parser.add_argument('text', help='The text to be encrypted')
    encrypt_parser.add_argument(*c_args, **c_kwargs)
    encrypt_parser.add_argument(*n_args, **n_kwargs)
    encrypt_parser.add_argument(*s_args, **s_kwargs)
    encrypt_parser.set_defaults(func=encrypt)

    decrypt_parser = subparsers.add_parser('decrypt', help='Decrypt the secret value using KMS')
    decrypt_parser.add_argument(*c_args, **c_kwargs)
    decrypt_parser.add_argument(*n_args, **n_kwargs)
    decrypt_parser.set_defaults(func=decrypt)

    events_parser = subparsers.add_parser(
        'events',
        help='Configure all events of CloudWatchEvents using the function')
    events_parser.add_argument(*c_args, **c_kwargs)
    events_parser.add_argument(*d_args, **d_kwargs)
    events_parser.add_argument(*k_args, **k_kwargs)
    events_parser.set_defaults(func=events)

    invoke_parser = subparsers.add_parser(
        'invoke',
        help='Invoke the function')
    invoke_parser.add_argument(
        'json', default='{}', help='The JSON string or file that pass to the function')
    invoke_parser.add_argument(*a_args, **a_kwargs)
    invoke_parser.add_argument(*c_args, **c_kwargs)
    invoke_parser.add_argument(*v_args, **v_kwargs)
    invoke_parser.set_defaults(func=invoke)

    rollback_parser = subparsers.add_parser(
        'rollback',
        help='Rollback your code and libraries')
    rollback_parser.add_argument(*a_args, **a_kwargs)
    rollback_parser.add_argument(*c_args, **c_kwargs)
    rollback_parser.add_argument(*v_args, **v_kwargs)
    rollback_parser.set_defaults(func=rollback)

    try:
        args = parser.parse_args()
        args.func(args)
        sys.exit(0)
    except Exception as e:
        msg = str(e)
        logging.exception(msg)
        sys.exit(colored(msg, 'red'))
Пример #53
0
    def algorithm(self, *args, **kwargs):
        """
        _algorithm_

        Split files into a number of lumis per job
        Allow a flag to determine if we split files between jobs
        """

        avgEventsPerJob = int(kwargs.get('events_per_job', 5000))
        jobLimit = int(kwargs.get('job_limit', 0))
        eventLimit = int(kwargs.get('max_events_per_lumi', 20000))
        totalEvents = int(kwargs.get('total_events', 0))
        splitOnFile = bool(kwargs.get('halt_job_on_file_boundaries', False))
        self.collectionName = kwargs.get('collectionName', None)
        splitOnRun = kwargs.get('splitOnRun', True)
        getParents = kwargs.get('include_parents', False)
        runWhitelist = kwargs.get('runWhitelist', [])
        runs = kwargs.get('runs', None)
        lumis = kwargs.get('lumis', None)
        applyLumiCorrection = bool(kwargs.get('applyLumiCorrection', False))
        deterministicPileup = kwargs.get('deterministicPileup', False)

        timePerEvent, sizePerEvent, memoryRequirement = \
            self.getPerformanceParameters(kwargs.get('performance', {}))

        eventsPerLumiInDataset = 0

        if self.package == 'WMCore.WMBS':
            self.loadRunLumi = self.daoFactory(classname="Files.GetBulkRunLumi")
            if deterministicPileup:
                getJobNumber = self.daoFactory(classname="Jobs.GetNumberOfJobsPerWorkflow")
                self.nJobs = getJobNumber.execute(workflow=self.subscription.getWorkflow().id)

        goodRunList = {}
        if runs and lumis:
            goodRunList = buildLumiMask(runs, lumis)

        # If we have runLumi info, we need to load it from couch
        if self.collectionName:
            try:
                from WMCore.ACDC.DataCollectionService import DataCollectionService
                couchURL = kwargs.get('couchURL')
                couchDB = kwargs.get('couchDB')
                filesetName = kwargs.get('filesetName')

                logging.info('Creating jobs for ACDC fileset %s', filesetName)
                dcs = DataCollectionService(couchURL, couchDB)
                goodRunList = dcs.getLumiWhitelist(self.collectionName, filesetName)
            except Exception as ex:
                msg = "Exception while trying to load goodRunList. "
                msg += "Refusing to create any jobs.\nDetails: %s" % str(ex)
                logging.exception(msg)
                return

        lDict = self.getFilesSortedByLocation(avgEventsPerJob)
        if not lDict:
            logging.info("There are not enough events/files to be splitted. Trying again next cycle")
            return


        locationDict = {}
        for key in lDict.keys():
            newlist = []
            # First we need to load the data
            if self.loadRunLumi:
                fileLumis = self.loadRunLumi.execute(files=lDict[key])
                for f in lDict[key]:
                    lumiDict = fileLumis.get(f['id'], {})
                    for run in lumiDict.keys():
                        f.addRun(run=Run(run, *lumiDict[run]))

            for f in lDict[key]:
                if len(f['runs']) == 0:
                    continue
                f['runs'] = sorted(f['runs'])
                f['lumiCount'] = 0
                for run in f['runs']:
                    run.lumis.sort()
                    f['lumiCount'] += len(run.lumis)
                f['lowestRun'] = f['runs'][0]

                # Do average event per lumi calculation
                if f['lumiCount']:
                    f['avgEvtsPerLumi'] = round(float(f['events']) / f['lumiCount'])
                    if deterministicPileup:
                        # We assume that all lumis are equal in the dataset
                        eventsPerLumiInDataset = f['avgEvtsPerLumi']
                else:
                    # No lumis in the file, ignore it
                    continue
                newlist.append(f)

            locationDict[key] = sorted(newlist, key=operator.itemgetter('lowestRun'))

        totalJobs = 0
        lastLumi = None
        firstLumi = None
        lastRun = None
        lumisInJob = 0
        totalAvgEventCount = 0
        currentJobAvgEventCount = 0
        stopTask = False
        self.lumiChecker = LumiChecker(applyLumiCorrection)
        for location in locationDict:

            # For each location, we need a new jobGroup
            self.newGroup()
            stopJob = True
            for f in locationDict[location]:

                if getParents:
                    parentLFNs = self.findParent(lfn=f['lfn'])
                    for lfn in parentLFNs:
                        parent = File(lfn=lfn)
                        f['parents'].add(parent)

                lumisInJobInFile = 0
                updateSplitOnJobStop = False
                failNextJob = False
                # If the number of events per lumi is higher than the limit
                # and it's only one lumi then ditch that lumi
                if f['avgEvtsPerLumi'] > eventLimit and f['lumiCount'] == 1:
                    failNextJob = True
                    stopJob = True
                    lumisPerJob = 1
                elif splitOnFile:
                    # Then we have to split on every boundary
                    stopJob = True
                    # Check the average number of events per lumi in this file
                    # Adapt the lumis per job to match the target conditions
                    if f['avgEvtsPerLumi']:
                        # If there are events in the file
                        ratio = float(avgEventsPerJob) / f['avgEvtsPerLumi']
                        lumisPerJob = max(int(math.floor(ratio)), 1)
                    else:
                        # Zero event file, then the ratio goes to infinity. Computers don't like that
                        lumisPerJob = f['lumiCount']
                else:
                    # Analyze how many events does this job already has
                    # Check how many we want as target, include as many lumi sections as possible
                    updateSplitOnJobStop = True
                    eventsRemaining = max(avgEventsPerJob - currentJobAvgEventCount, 0)
                    if f['avgEvtsPerLumi']:
                        lumisAllowed = int(math.floor(float(eventsRemaining) / f['avgEvtsPerLumi']))
                    else:
                        lumisAllowed = f['lumiCount']
                    lumisPerJob = max(lumisInJob + lumisAllowed, 1)

                for run in f['runs']:
                    if not isGoodRun(goodRunList=goodRunList, run=run.run):
                        # Then skip this one
                        continue
                    if len(runWhitelist) > 0 and not run.run in runWhitelist:
                        # Skip due to run whitelist
                        continue
                    firstLumi = None

                    if splitOnRun and run.run != lastRun:
                        # Then we need to kill this job and get a new one
                        stopJob = True

                    # Now loop over the lumis
                    for lumi in run:
                        if (not isGoodLumi(goodRunList, run=run.run, lumi=lumi) or
                                self.lumiChecker.isSplitLumi(run.run, lumi, f)):
                            # Kill the chain of good lumis
                            # Skip this lumi
                            if firstLumi != None and firstLumi != lumi:
                                self.currentJob['mask'].addRunAndLumis(run=run.run,
                                                                       lumis=[firstLumi, lastLumi])
                                eventsAdded = ((lastLumi - firstLumi + 1) * f['avgEvtsPerLumi'])
                                runAddedTime = eventsAdded * timePerEvent
                                runAddedSize = eventsAdded * sizePerEvent
                                self.currentJob.addResourceEstimates(jobTime=runAddedTime, disk=runAddedSize)
                                firstLumi = None
                                lastLumi = None
                            continue

                        # You have to kill the lumi chain if they're not continuous
                        if lastLumi and not lumi == lastLumi + 1:
                            self.currentJob['mask'].addRunAndLumis(run=run.run,
                                                                   lumis=[firstLumi, lastLumi])
                            eventsAdded = ((lastLumi - firstLumi + 1) * f['avgEvtsPerLumi'])
                            runAddedTime = eventsAdded * timePerEvent
                            runAddedSize = eventsAdded * sizePerEvent
                            self.currentJob.addResourceEstimates(jobTime=runAddedTime, disk=runAddedSize)
                            firstLumi = None
                            lastLumi = None

                        if firstLumi is None:
                            # Set the first lumi in the run
                            firstLumi = lumi

                        # If we're full, end the job
                        if lumisInJob == lumisPerJob:
                            stopJob = True
                        # Actually do the new job creation
                        if stopJob:
                            if firstLumi != None and lastLumi != None and lastRun != None:
                                self.currentJob['mask'].addRunAndLumis(run=lastRun,
                                                                       lumis=[firstLumi, lastLumi])
                                eventsAdded = ((lastLumi - firstLumi + 1) * f['avgEvtsPerLumi'])
                                runAddedTime = eventsAdded * timePerEvent
                                runAddedSize = eventsAdded * sizePerEvent
                                self.currentJob.addResourceEstimates(jobTime=runAddedTime, disk=runAddedSize)
                            msg = None
                            if failNextJob:
                                msg = "File %s has too many events (%d) in %d lumi(s)" % (f['lfn'],
                                                                                          f['events'],
                                                                                          f['lumiCount'])
                            self.lumiChecker.closeJob(self.currentJob)
                            self.newJob(name=self.getJobName(), failedJob=failNextJob, failedReason=msg)
                            if deterministicPileup:
                                skipEvents = (self.nJobs - 1) * lumisPerJob * eventsPerLumiInDataset
                                self.currentJob.addBaggageParameter("skipPileupEvents", skipEvents)
                            self.currentJob.addResourceEstimates(memory=memoryRequirement)
                            failNextJob = False
                            firstLumi = lumi
                            lumisInJob = 0
                            lumisInJobInFile = 0
                            currentJobAvgEventCount = 0
                            totalJobs += 1
                            if jobLimit and totalJobs > jobLimit:
                                msg = "Job limit of {0} jobs exceeded.".format(jobLimit)
                                raise RuntimeError(msg)

                            # Add the file to new jobs
                            self.currentJob.addFile(f)

                            if updateSplitOnJobStop:
                                # Then we were carrying from a previous file
                                # Reset calculations for this file
                                updateSplitOnJobStop = False
                                if f['avgEvtsPerLumi']:
                                    ratio = float(avgEventsPerJob) / f['avgEvtsPerLumi']
                                    lumisPerJob = max(int(math.floor(ratio)), 1)
                                else:
                                    lumisPerJob = f['lumiCount']

                        lumisInJob += 1
                        lumisInJobInFile += 1
                        lastLumi = lumi
                        stopJob = False
                        lastRun = run.run
                        totalAvgEventCount += f['avgEvtsPerLumi']

                        if self.currentJob and not f in self.currentJob['input_files']:
                            self.currentJob.addFile(f)

                        # We stop here if there are more total events than requested.
                        if totalEvents > 0 and totalAvgEventCount >= totalEvents:
                            stopTask = True
                            break

                    if firstLumi != None and lastLumi != None:
                        # Add this run to the mask
                        self.currentJob['mask'].addRunAndLumis(run=run.run,
                                                               lumis=[firstLumi, lastLumi])
                        eventsAdded = ((lastLumi - firstLumi + 1) * f['avgEvtsPerLumi'])
                        runAddedTime = eventsAdded * timePerEvent
                        runAddedSize = eventsAdded * sizePerEvent
                        self.currentJob.addResourceEstimates(jobTime=runAddedTime, disk=runAddedSize)
                        firstLumi = None
                        lastLumi = None

                    if stopTask:
                        break

                if not splitOnFile:
                    currentJobAvgEventCount += f['avgEvtsPerLumi'] * lumisInJobInFile

                if stopTask:
                    break

            if stopTask:
                break

        self.lumiChecker.closeJob(self.currentJob)
        self.lumiChecker.fixInputFiles()
        return
Пример #54
0
def gui_index():
    info = make_info_dict()
    try:
        return render_template('gui/dashboard.html', **info)
    except Exception, e:
        logging.exception(e)
Пример #55
0
 def clear_all_keys(self):
     self.bootstrap_storage()
     try:
         return self.storage.clear_all_keys()
     except Exception:
         logging.exception("Unable to clear all keys")
Пример #56
0
    def dbs(self, **params):
        logging.debug(params)
        messages = []
        data = []
        preset_configs = []
        metrics_list = []
        active_dbnames = []
        preset_configs_json = {}

        if params:
            try:
                if params.get('save'):
                    messages += pgwatch2.update_monitored_db(params, cmd_args)
                elif params.get('new'):
                    messages += pgwatch2.insert_monitored_db(params, cmd_args)
                elif params.get('delete'):
                    pgwatch2.delete_monitored_db(params)
                    messages.append('Entry with ID {} ("{}") deleted!'.format(
                        params['md_id'], params['md_unique_name']))
                elif params.get('delete_single'):
                    if not params['single_unique_name']:
                        raise Exception('No "Unique Name" provided!')
                    if cmd_args.datastore == 'influx':
                        pgwatch2_influx.delete_influx_data_single(
                            params['single_unique_name'])
                    else:
                        pgwatch2.delete_postgres_metrics_data_single(
                            params['single_unique_name'])
                    messages.append('Data for "{}" deleted!'.format(
                        params['single_unique_name']))
                elif params.get('delete_all'):
                    active_dbs = pgwatch2.get_active_db_uniques()
                    if cmd_args.datastore == 'influx':
                        deleted_dbnames = pgwatch2_influx.delete_influx_data_all(
                            active_dbs)
                    else:
                        deleted_dbnames = pgwatch2.delete_postgres_metrics_for_all_inactive_hosts(
                            active_dbs)
                    messages.append('Data deleted for: {}'.format(
                        ','.join(deleted_dbnames)))
                elif params.get('disable_all'):
                    affected = pgwatch2.disable_all_dbs()
                    messages.append(
                        '{} DBs disabled. It will take some minutes for this to become effective'
                        .format(affected))
                elif params.get('enable_all'):
                    affected = pgwatch2.enable_all_dbs()
                    messages.append('{} DBs enabled'.format(affected))
                elif params.get('set_bulk_config'):
                    affected = pgwatch2.set_bulk_config(params)
                    messages.append(
                        "'{}' preset set as config for {} DBs. It will take some minutes for this to become effective"
                        .format(params.get('bulk_preset_config_name'),
                                affected))
                elif params.get('set_bulk_timeout'):
                    affected = pgwatch2.set_bulk_timeout(params)
                    messages.append("Timeout set for {} DBs".format(affected))
                elif params.get('set_bulk_password'):
                    err, affected = pgwatch2.set_bulk_password(
                        params, cmd_args)
                    if err:
                        messages.append(err)
                    else:
                        messages.append(
                            "Password updated for {} DBs".format(affected))
            except Exception as e:
                logging.exception('Changing DBs failed')
                messages.append('ERROR: ' + str(e))

        try:
            active_dbnames = pgwatch2_influx.get_active_dbnames(
            ) if cmd_args.datastore == 'influx' else pgwatch2.get_all_dbnames(
            )
        except Exception as e:
            logging.exception(e)
            messages.append(str(e))
        except Exception as e:
            logging.exception('ERROR getting DB listing from metrics DB')
            messages.append('ERROR getting DB listing from metrics DB: ' +
                            str(e))

        try:
            data = pgwatch2.get_all_monitored_dbs()
            preset_configs = pgwatch2.get_preset_configs()
            preset_configs_json = json.dumps(
                {c['pc_name']: c['pc_config']
                 for c in preset_configs})
            metrics_list = pgwatch2.get_active_metrics_with_versions()
        except psycopg2.OperationalError:
            messages.append('ERROR: Could not connect to Postgres')
        except Exception as e:
            messages.append('ERROR: ' + str(e))

        tmpl = env.get_template('dbs.html')
        return tmpl.render(messages=messages,
                           data=data,
                           preset_configs=preset_configs,
                           preset_configs_json=preset_configs_json,
                           metrics_list=metrics_list,
                           active_dbnames=active_dbnames,
                           no_anonymous_access=cmd_args.no_anonymous_access,
                           session=cherrypy.session,
                           no_component_logs=cmd_args.no_component_logs,
                           aes_gcm_enabled=cmd_args.aes_gcm_keyphrase,
                           datastore=cmd_args.datastore)
Пример #57
0
 def save(self, key, value, expire=None):
     self.bootstrap_storage()
     try:
         return self.storage.save(key, pickle.dumps(value), expire=expire)
     except Exception:
         logging.exception("Unable to save %s", key)
Пример #58
0
 def size(self):
     self.bootstrap_storage()
     try:
         return self.storage.size()
     except Exception:
         logging.exception("Failed to get the size of our storage")
Пример #59
0
def handle_500(_, response, exception):
  """ Handles 500, error processing page exceptions. """
  logging.exception(exception)
  response.set_status(500)
  response.write(jinja_environment.get_template('500.html').render())
Пример #60
0
 def clear(self, key):
     self.bootstrap_storage()
     try:
         return self.storage.clear(key)
     except Exception:
         logging.exception("Unable to clear %s", key)