Example #1
0
	def setDevice(self):
		h=headers
		h.update({
		  'Authorization' : 'OAuth="'+ self.oauth + '"',
	 	  'Content-Length' : 342,
	 	  'Content-Type' : 'application/json'})
		payload ={
		    "app_build": "18.0.11", 
		    "country_id": "US", 
		    "gps_adid": "05596566-c7c7-4bc7-a6c9-729715c9ad98", 
		    "idfa": "f550c51fa242216c", 
		    "language_id": "en", 
		    "os_version": 19, 
		    "token": "APA91bE3axREMeqEpvjkIOWyCBWRO1c4Zm69nyH5f5a7o9iRitRq96ergzyrRfYK5hsDa_-8J35ar7zi5AZFxVeA6xfpK77_kCVRqFmbayGuYy7Uppy_krXIaTAe8Vdd7oUoXJBA7q2vVnZ6hj9afmju9C3vMKz-KA", 
		    "type": "android"
		}
		url = 'https://api.happn.fr/api/users/' + self.id + '/devices/1830658762'
		r = requests.put(url,headers=h,data=json.dumps(payload))

		# Check status of device set
 		if r.status_code == 200:
 			logging.info('Device Set')
 		else:
 			# Device set denied by server
	 		logging.warning('Server denied request for device set change: %d', r.status_code)
Example #2
0
    def contour(self, data):
        """
        Overlay a contour-plot

        @param data: 2darray with the 2theta values in radians...
        """
        if self.fig is None:
            logging.warning("No diffraction image available => not showing the contour")
        else:
            while len(self.msp.images) > 1:
                self.msp.images.pop()
            while len(self.ct.images) > 1:
                self.ct.images.pop()
            while len(self.ct.collections) > 0:
                self.ct.collections.pop()

            if self.points.dSpacing and  self.points._wavelength:
                angles = list(2.0 * numpy.arcsin(5e9 * self.points._wavelength / numpy.array(self.points.dSpacing)))
            else:
                angles = None
            try:
                xlim, ylim = self.ax.get_xlim(), self.ax.get_ylim()
                self.ct.contour(data, levels=angles)
                self.ax.set_xlim(xlim);self.ax.set_ylim(ylim);
                print("Visually check that the curve overlays with the Debye-Sherrer rings of the image")
                print("Check also for correct indexing of rings")
            except MemoryError:
                logging.error("Sorry but your computer does NOT have enough memory to display the 2-theta contour plot")
            self.fig.show()
Example #3
0
    def Adjustment(self):
        """ adjustment & and blunder removing

            :returns: adjusted coordinates or None
        """
        # adjustment loop
        last_res = None
        while True:
            res, blunder = self.g.adjust()
            if res is None or not 'east' in res[0] or not 'north' in res[0] or \
                              not 'elev' in res[0]:
                # adjustment faild or too many blunders
                if not last_res is None:
                    logging.warning("blunders are not fully removed")
                    res = last_res
                else:
                    logging.error("adjustment failed")
                break
            elif blunder['std-residual'] < 1.0:
                logging.info("blunders removed")
                break
            else:
                logging.info("%s - %s observation removed" % (blunder['from'], blunder['to']))
                self.g.remove_observation(blunder['from'], blunder['to'])
                last_res = res
        return res
Example #4
0
	def getDistance(self, userID):
		""" Gets the distance from the sybil"""
	 	h={
		 	'http.useragent':   'Happn/1.0 AndroidSDK/0',
			'Authorization':    'OAuth="' + self.oauth+'"',
			'Content-Type':     'application/json',
			'User-Agent':       'Dalvik/1.6.0 (Linux; U; Android 4.4.2; SCH-I535 Build/KOT49H)',
			'Host':             'api.happn.fr',
			'Connection':       'Keep-Alive',
			'Accept-Encoding':  'gzip'
	 	}
	 	query = '?query=%7B%22fields%22%3A%22about%2Cis_accepted%2Cage%2Cjob%2Cworkplace%2Cmodification_date%2Cprofiles.mode%281%29.width%28720%29.height%281280%29.fields%28url%2Cwidth%2Cheight%2Cmode%29%2Clast_meet_position%2Cmy_relation%2Cis_charmed%2Cdistance%2Cgender%2Cmy_conversation%22%7D'
	 	url = 'https://api.happn.fr/api/users/' + userID + query	 

	 	try:
	 		r = requests.get(url, headers=h)
	 	except:
	 		logging.warning('Error creating connection to Happn server for distance query')
	 		return False

	 	if r.status_code == 200:
	 		# Succesfully got distance	 		
	 		self.distance = r.json()['data']['distance']
	 		logging.info('Sybil %d m from target',self.distance)
	 	else:
	 		logging.warning('Server denied request for user distance: %d', r.status_code)
	 		self.distance = -1;
	 		return False
  def __ReadPickled(self, filename):
    """Reads a pickled object from the given file and returns it.
    """
    self.__file_lock.acquire()

    try:
      try:
        if (filename and
            filename != '/dev/null' and
            os.path.isfile(filename) and
            os.stat(filename).st_size > 0):
          return pickle.load(open(filename, 'rb'))
        else:
          logging.warning('Could not read datastore data from %s', filename)
      except (AttributeError, LookupError, ImportError, NameError, TypeError,
              ValueError, struct.error, pickle.PickleError), e:


        raise apiproxy_errors.ApplicationError(
            datastore_pb.Error.INTERNAL_ERROR,
            'Could not read data from %s. Try running with the '
            '--clear_datastore flag. Cause:\n%r' % (filename, e))
    finally:
      self.__file_lock.release()

    return []
Example #6
0
  def testFramesFromMp4(self):
    host_platform = platform.GetHostPlatform()

    try:
      host_platform.InstallApplication('avconv')
    finally:
      if not host_platform.CanLaunchApplication('avconv'):
        logging.warning('Test not supported on this platform')
        return  # pylint: disable=W0150

    vid = os.path.join(util.GetUnittestDataDir(), 'vid.mp4')
    expected_timestamps = [
      0,
      763,
      783,
      940,
      1715,
      1732,
      1842,
      1926,
      ]

    video_obj = video.Video(vid)

    # Calling _FramesFromMp4 should return all frames.
    # pylint: disable=W0212
    for i, timestamp_bitmap in enumerate(video_obj._FramesFromMp4(vid)):
      timestamp, bmp = timestamp_bitmap
      self.assertEquals(timestamp, expected_timestamps[i])
      expected_bitmap = image_util.FromPngFile(os.path.join(
          util.GetUnittestDataDir(), 'frame%d.png' % i))
      self.assertTrue(image_util.AreEqual(expected_bitmap, bmp))
def RefreshManifestCheckout(manifest_dir, manifest_repo):
  """Checks out manifest-versions into the manifest directory.

  If a repository is already present, it will be cleansed of any local
  changes and restored to its pristine state, checking out the origin.
  """
  reinitialize = True
  if os.path.exists(manifest_dir):
    result = git.RunGit(manifest_dir, ['config', 'remote.origin.url'],
                        error_code_ok=True)
    if (result.returncode == 0 and
        result.output.rstrip() == manifest_repo):
      logging.info('Updating manifest-versions checkout.')
      try:
        git.RunGit(manifest_dir, ['gc', '--auto'])
        git.CleanAndCheckoutUpstream(manifest_dir)
      except cros_build_lib.RunCommandError:
        logging.warning('Could not update manifest-versions checkout.')
      else:
        reinitialize = False
  else:
    logging.info('No manifest-versions checkout exists at %s', manifest_dir)

  if reinitialize:
    logging.info('Cloning fresh manifest-versions checkout.')
    osutils.RmDir(manifest_dir, ignore_missing=True)
    repository.CloneGitRepo(manifest_dir, manifest_repo)
Example #8
0
    def initialize(self, io_loop=None, max_clients=10,
                   max_simultaneous_connections=None):
        self.io_loop = io_loop
        self._multi = pycurl.CurlMulti()
        self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout)
        self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket)
        self._curls = [_curl_create(max_simultaneous_connections)
                       for i in xrange(max_clients)]
        self._free_list = self._curls[:]
        self._requests = collections.deque()
        self._fds = {}
        self._timeout = None

        try:
            self._socket_action = self._multi.socket_action
        except AttributeError:
            # socket_action is found in pycurl since 7.18.2 (it's been
            # in libcurl longer than that but wasn't accessible to
            # python).
            logging.warning("socket_action method missing from pycurl; "
                            "falling back to socket_all. Upgrading "
                            "libcurl and pycurl will improve performance")
            self._socket_action = \
                lambda fd, action: self._multi.socket_all()

        # libcurl has bugs that sometimes cause it to not report all
        # relevant file descriptors and timeouts to TIMERFUNCTION/
        # SOCKETFUNCTION.  Mitigate the effects of such bugs by
        # forcing a periodic scan of all active requests.
        self._force_timeout_callback = ioloop.PeriodicCallback(
            self._handle_force_timeout, 1000, io_loop=io_loop)
        self._force_timeout_callback.start()
Example #9
0
def getFieldInfo(fieldnum):
    """Returns a dictionary containing the metadata of a K2 Campaign field.

    Raises a ValueError if the field number is unknown.

    Parameters
    ----------
    fieldnum : int
        Campaign field number (e.g. 0, 1, 2, ...)

    Returns
    -------
    field : dict
        The dictionary contains the keys
        'ra', 'dec', 'roll' (floats in decimal degrees),
        'start', 'stop', (strings in YYYY-MM-DD format)
        and 'comments' (free text).
    """
    try:
        from . import CAMPAIGN_DICT_FILE

        CAMPAIGN_DICT = json.load(open(CAMPAIGN_DICT_FILE))
        info = CAMPAIGN_DICT["c{0}".format(fieldnum)]
        # Print warning messages if necessary
        if fieldnum == 100:
            logging.warning("You are using the K2 first light field, " "you almost certainly do not want to do this")
        elif "preliminary" in info and info["preliminary"] == "True":
            logging.warning(
                "The field you are searching is not yet fixed "
                "and is only the proposed position. "
                "Do not use this position for target selection."
            )
        return info
    except KeyError:
        raise ValueError("Field {0} not set in this version " "of the code".format(fieldnum))
Example #10
0
    def _sanitize(self, badKey, badVal):
        valid = True

        # Used for debugging
        if 'csv_line' not in self:
            self['csv_line'] = "-1"

        # Catch bad formatting
        if badKey in self:
            logging.debug(badKey, ''.join(self[badKey]))
            logging.debug("Bad Key")
            valid = False

        if 'last_pymnt_d' in self and re.match("^\s*$", self['last_pymnt_d']):
            if 'issue_d' in self:
                # If no payment received, last payment date = issue date
                self['last_pymnt_d'] = self['issue_d']

        for k, v in self.items():
            if badVal == v:
                logging.debug(badVal)
                valid = False
                break

            # Replace empties with 0s
            if re.match('^\s*$', str(v)):
                self[k] = 0

        if not valid:
            logging.debug(self.items())
            # Can't safely access specific keys, other than id, when incorrectly formatted
            logging.warning("Fix Loan {}".format(self['id']))
            logging.warning("Line {}".format(self['csv_line']))

        return valid
Example #11
0
 def _count(self, tstmp):
     self.ended = tstmp
     self.counting += 1
     if self.counting >= self.window:
         if not self.is_triggered:
             logging.warning("%s", self)
         self.is_triggered = True
Example #12
0
	def get_question_by_id(self, sitename, id):
		"""For debug purposes, this function gets a question by its id and saves it"""
		con = self.connect_to_mysql()
		se_api_url ="http://api.stackexchange.com/2.2/questions/{0}/?site={1}&filter={2}".format(id, sitename, self.se_getquestions_filter)

		print "calling {0}".format(se_api_url)
		r = requests.get(se_api_url)
		if (r.status_code == 200):
			se_posts = json.loads(r.text)
			print "found se_post"

			for post in se_posts['items']:
				if 'question_id' in post:
					q = self.process_post(post)
					q.display_state()
					for ans in q.answers:
						ans.display_state()

					self.save_posts(con, q)
				else:
					logging.warning("Malformed post - no json['question_id']!\n{0}".format(post))
		else:
			logging.warning("Http status_code {1} reading posts from Stack Exchange at {0}".format(se_api_url, r.status_code))

		if con.open:
			con.close()
Example #13
0
    def call(self, function, params=None):
        self.requestPerMinute += 1
        now = datetime.utcnow()

        if self.requestPerMinute >= self.requestLimit:
            waittime = 60 - now.second
            logging.warning("Limit for request per minute exceeded. Waiting for: {0} sec.".format(waittime))
            time.sleep(waittime)
            now = datetime.utcnow()

        if self.checkMinute != now.minute:
            self.requestPerMinute = 0
            self.checkMinute = now.minute

        payload = ''
        try:
            p = "" if not params else '?' + "&".join(
                ["{key}={value}".format(key=k, value=v) for (k, v) in params.iteritems()])
            url = "{base}.{func}{params}".format(base=self.baseConfig["url"], func=function, params=p)
            logging.debug("{0} {1} API call:{2}".format(self.checkMinute, self.requestPerMinute, url))
            request = urllib2.Request(url, None, self.baseConfig["headers"])
            stream = urllib2.urlopen(request)
            payload = stream.read()
            data = json.loads(payload)
            if isinstance(data, dict) and 'ruid' in data:
                logging.error('Api call failed with error: {0} Code: {1}'.format(data['message'], data['code']))
                return None
            return data

        except Exception as e:
            logging.error('Error: {0} Context: {1}'.format(e, payload))
            return None
def track_call(api_action, api_details, x_tba_app_id):
    analytics_id = Sitevar.get_by_id("google_analytics.id")
    if analytics_id is None:
        logging.warning("Missing sitevar: google_analytics.id. Can't track API usage.")
    else:
        GOOGLE_ANALYTICS_ID = analytics_id.contents['GOOGLE_ANALYTICS_ID']
        params = urllib.urlencode({
            'v': 1,
            'tid': GOOGLE_ANALYTICS_ID,
            'cid': uuid.uuid3(uuid.NAMESPACE_X500, str(x_tba_app_id)),
            't': 'event',
            'ec': 'api',
            'ea': api_action,
            'el': api_details,
            'cd1': x_tba_app_id,  # custom dimension 1
            'ni': 1,
            'sc': 'end',  # forces tracking session to end
        })

        # Sets up the call
        analytics_url = 'http://www.google-analytics.com/collect?%s' % params
        urlfetch.fetch(
            url=analytics_url,
            method=urlfetch.GET,
            deadline=10,
        )
Example #15
0
def recursive_flush(handle, path):
  """ Recursively deletes the path and the value of the children of the given
  node.

  Args:
    handle: A Zookeeper client handler.
    path: The Zookeeper path to delete.
  """
  try:
    children = handle.get_children(path)
    if not any(path.startswith(item) for item in ZK_IGNORE_PATHS):
      logging.debug("Processing path: {0}".format(path))
      for child in children:
        logging.debug("Processing child: {0}".format(child))
        new_path = '{0}{1}'.format(path, child)
        if path != ZK_TOP_LEVEL:
          new_path = PATH_SEPARATOR.join([path, child])
        recursive_flush(handle, new_path)
      try:
        handle.delete(path)
      except kazoo.exceptions.BadArgumentsError:
        logging.warning('BadArgumentsError while deleting path: {0}.'.format(
          path))
      except kazoo.exceptions.NotEmptyError:
        logging.warning('NotEmptyError while deleting path: {0}. Skipping..'.
          format(path))
  except kazoo.exceptions.NoNodeError:
    logging.debug('Reached the end of the zookeeper path.')
Example #16
0
    def _compare(src_list, dst_lst, src_remote, dst_remote, file):
        """Return True if src_list[file] matches dst_list[file], else False"""
        attribs_match = True
        if not (src_list.has_key(file) and dst_list.has_key(file)):
            info(u"%s: does not exist in one side or the other: src_list=%s, dst_list=%s" % (file, src_list.has_key(file), dst_list.has_key(file)))
            return False

        ## check size first
        if 'size' in cfg.sync_checks and dst_list[file]['size'] != src_list[file]['size']:
            debug(u"xfer: %s (size mismatch: src=%s dst=%s)" % (file, src_list[file]['size'], dst_list[file]['size']))
            attribs_match = False

        ## check md5
        compare_md5 = 'md5' in cfg.sync_checks
        # Multipart-uploaded files don't have a valid md5 sum - it ends with "...-nn"
        if compare_md5:
            if (src_remote == True and src_list[file]['md5'].find("-") >= 0) or (dst_remote == True and dst_list[file]['md5'].find("-") >= 0):
                compare_md5 = False
                info(u"disabled md5 check for %s" % file)
        if attribs_match and compare_md5:
            try:
                src_md5 = src_list.get_md5(file)
                dst_md5 = dst_list.get_md5(file)
            except (IOError,OSError), e:
                # md5 sum verification failed - ignore that file altogether
                debug(u"IGNR: %s (disappeared)" % (file))
                warning(u"%s: file disappeared, ignoring." % (file))
                raise

            if src_md5 != dst_md5:
                ## checksums are different.
                attribs_match = False
                debug(u"XFER: %s (md5 mismatch: src=%s dst=%s)" % (file, src_md5, dst_md5))
Example #17
0
    def copy_if_hash_differs(vm, local_path, remote_path):
        """
        Copy a file to a guest if it doesn't exist or if its MD5sum differs.

        :param vm: VM object.
        :param local_path: Local path.
        :param remote_path: Remote path.

        :return: Whether the hash differs (True) or not (False).
        """
        hash_differs = False
        local_hash = utils.hash_file(local_path)
        basename = os.path.basename(local_path)
        output = session.cmd_output("md5sum %s" % remote_path)
        if "such file" in output:
            remote_hash = "0"
        elif output:
            remote_hash = output.split()[0]
        else:
            logging.warning("MD5 check for remote path %s did not return.",
                            remote_path)
            # Let's be a little more lenient here and see if it wasn't a
            # temporary problem
            remote_hash = "0"
        if remote_hash != local_hash:
            hash_differs = True
            logging.debug("Copying %s to guest "
                          "(remote hash: %s, local hash:%s)",
                          basename, remote_hash, local_hash)
            vm.copy_files_to(local_path, remote_path)
        return hash_differs
Example #18
0
    def vars2png(self, wrfout_path, dom_id, ts_esmf, vars):
        """
        Postprocess a list of scalar fields into KMZ files.

        :param wrfout_path: WRF file to process
        :param dom_id: the domain identifier
        :param ts_esmf: time stamp in ESMF format
        :param vars: list of variables to process
        """
        # open the netCDF dataset
        d = nc4.Dataset(wrfout_path)

        # extract ESMF string times and identify timestamp of interest
        times = [''.join(x) for x in d.variables['Times'][:]]
        if ts_esmf not in times:
            raise PostprocError("Invalid timestamp %s" % ts_esmf)
        tndx = times.index(ts_esmf)

        # build one KMZ per variable
        for var in vars:
            try:
                outpath_base = os.path.join(self.output_path, self.product_name + ("-%02d-" % dom_id) + ts_esmf + "-" + var) 
                if var in ['WINDVEC']:
                    raster_path, coords = self._vector2png(d, var, tndx, outpath_base)
                    raster_name = osp.basename(raster_path)
                    self._update_manifest(dom_id, ts_esmf, var, { 'raster' : raster_name, 'coords' : coords})
                else:
                    raster_path, cb_path, coords = self._scalar2png(d, var, tndx, outpath_base)
                    mf_upd = { 'raster' : osp.basename(raster_path), 'coords' : coords}
                    if cb_path is not None:
                        mf_upd['colorbar'] = osp.basename(cb_path)
                    self._update_manifest(dom_id, ts_esmf, var, mf_upd)
            except Exception as e:
                logging.warning("Exception %s while postprocessing %s for time %s into PNG" % (e.message, var, ts_esmf))
                logging.warning(traceback.print_exc())
Example #19
0
def _fswalk_follow_symlinks(path):
    '''
    Walk filesystem, following symbolic links (but without recursion), on python2.4 and later

    If a symlink directory loop is detected, emit a warning and skip.
    E.g.: dir1/dir2/sym-dir -> ../dir2
    '''
    assert os.path.isdir(path) # only designed for directory argument
    walkdirs = set([path])
    for dirpath, dirnames, filenames in os.walk(path):
        handle_exclude_include_walk(dirpath, dirnames, [])
        real_dirpath = os.path.realpath(dirpath)
        for dirname in dirnames:
            current = os.path.join(dirpath, dirname)
            real_current = os.path.realpath(current)
            if os.path.islink(current):
                if (real_dirpath == real_current or
                    real_dirpath.startswith(real_current + os.path.sep)):
                    warning("Skipping recursively symlinked directory %s" % dirname)
                else:
                    walkdirs.add(current)
    for walkdir in walkdirs:
        for dirpath, dirnames, filenames in os.walk(walkdir):
            handle_exclude_include_walk(dirpath, dirnames, [])
            yield (dirpath, dirnames, filenames)
Example #20
0
def find_workflow_figure(page, name, prefix=None, retries=5):
    """ Return :class:`WorkflowFigure` for `name`. """
    root = page.root or page.browser
    for retry in range(retries):
        time.sleep(0.5)  # Pause for stable display.
        figures = root.find_elements_by_class_name('WorkflowFigure')
        if not figures:
            continue
        driver_name = None
        for figure in figures:
            page.browser.implicitly_wait(1)
            try:
                children = figure.find_elements_by_xpath('./*')
                driver_fig = children[0]
                driver_name = driver_fig.find_element_by_css_selector('svg text').text
            except StaleElementReferenceException:
                logging.warning('get_workflow_figure:'
                                ' StaleElementReferenceException')
            else:
                if driver_name == name:
                    fig = WorkflowFigure(page.browser, page.port, figure)
                    if prefix is not None:
                        if prefix:
                            fig.pathname = '%s.%s' % (prefix, name)
                        else:
                            fig.pathname = name
                    return fig
            finally:
                page.browser.implicitly_wait(TMO)
    return None
Example #21
0
    def vars2kmz(self, wrfout_path, dom_id, ts_esmf, vars):
        """
        Postprocess a list of scalar fields at a given simulation time into KMZ files.

        :param wrfout_path: WRF file to process
        :param dom_id: the domain identifier
        :param ts_esmf: time stamp in ESMF format
        :param vars: list of variables to process
        """
        # open the netCDF dataset
        d = nc4.Dataset(wrfout_path)

        # extract ESMF string times and identify timestamp of interest
        times = [''.join(x) for x in d.variables['Times'][:]]
        if ts_esmf not in times:
            raise PostprocError("Invalid timestamp %s" % ts_esmf)
        tndx = times.index(ts_esmf)

        # build one KMZ per variable
        for var in vars:
            try:
                outpath_base = os.path.join(self.output_path, self.product_name + ("-%02d-" % dom_id) + ts_esmf + "-" + var) 
                kmz_path = None
                if var in ['WINDVEC']:
                    kmz_path,_,_ = self._vector2kmz(d, var, tndx, outpath_base)
                else:
                    kmz_path,_,_,_ = self._scalar2kmz(d, var, tndx, outpath_base)
                kmz_name = osp.basename(kmz_path)
                self._update_manifest(dom_id, ts_esmf, var, { 'kml' : kmz_name })


            except Exception as e:
                logging.warning("Exception %s while postprocessing %s for time %s into KMZ" % (e.message, var, ts_esmf))
                logging.warning(traceback.print_exc())
Example #22
0
    def get_video_size(media_urls):
        '''Determine the resolution of the video

        Arguments: [media_urls]

        Return value: (width, height)
        '''
        try:
            if media_urls[0].startswith('http:') or media_urls[0].startswith('https:'):
                ffprobe_command = ['ffprobe', '-icy', '0', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '-timeout', '60000000', '-user-agent', USER_AGENT_PLAYER, '--', media_urls[0]]
            else:
                ffprobe_command = ['ffprobe', '-loglevel', 'repeat+warning' if verbose else 'repeat+error', '-print_format', 'json', '-select_streams', 'v', '-show_streams', '--', media_urls[0]]
            log_command(ffprobe_command)
            ffprobe_process = subprocess.Popen(ffprobe_command, stdout=subprocess.PIPE)
            try:
                ffprobe_output = json.loads(ffprobe_process.communicate()[0].decode('utf-8', 'replace'))
            except KeyboardInterrupt:
                logging.warning('Cancelling getting video size, press Ctrl-C again to terminate.')
                ffprobe_process.terminate()
                return 0, 0
            width, height, widthxheight = 0, 0, 0
            for stream in dict.get(ffprobe_output, 'streams') or []:
                if dict.get(stream, 'width')*dict.get(stream, 'height') > widthxheight:
                    width, height = dict.get(stream, 'width'), dict.get(stream, 'height')
            return width, height
        except Exception as e:
            log_or_raise(e, debug=debug)
            return 0, 0
Example #23
0
    def notify_channel_of_changes(self):
        if self.confluence_feed_url:
            logging.info('Looking for Confluence updates.')
            feed_entries = check_feed(self.confluence_feed_url, self.username, self.password)
            confluence_updates = []
            new_date = self.last_date
            for entry in feed_entries:
                published = parser.parse(entry.published)
                if published > self.last_date:
                    # Append the update to the list if more recent than
                    # last reported date
                    confluence_updates.append(feed_entry_string(entry))
                    # track most recent date
                    if published > new_date:
                        new_date = published
            # Update most recent seen timestamp in save file
            if new_date > self.last_date:
                save_time(self.time_save_file, new_date)
                self.last_date = new_date
            for update in confluence_updates:
                logging.debug('Confluence update: %s' % update)
                self.bot.speak(update)
        else:
            logging.info('Skipping Confluence feed checks.')

        # start a new timer if existing timer wasn't cancelled,
        # which may have happened while we were polling repos.
        if self.timer and self.fetch_interval:
            self.timer = threading.Timer(self.fetch_interval, self.notify_channel_of_changes)
            self.timer.start()
        else:
            logging.warning('Not setting a new feed monitor timer.')
Example #24
0
def stop_dcvoip_process():
    try:
        logger.info("Stopping dcvoip process")
        with open(os.devnull, 'wb') as devnull:
            subprocess.check_call(["sudo", "service", "dcvoip", "stop"], stdout=devnull)
    except (subprocess.CalledProcessError, IOError):
        logging.warning("Unable to stop the dcvoip process")    
Example #25
0
    def setup(self, bot, options):
        self.bot = bot
        if options.has_key('confluence_feed_url'):
            self.confluence_feed_url = options['confluence_feed_url']
            logging.info('Monitoring Confluence feed %s' % self.confluence_feed_url)
        if options.has_key('fetch_interval'):
            try:
                fetch_interval = int(options['fetch_interval'])
                self.fetch_interval = fetch_interval
                logging.info('Checking at %d second intervals.' % self.fetch_interval)
            except:
                pass
        if options.has_key('username'):
            self.username = options['username']
        if options.has_key('password'):
            self.password = options['password']

        self.last_date = load_time(self.time_save_file)

        if not self.timer and self.fetch_interval:
            logging.info('Setting feed monitor timer to %d seconds.' % self.fetch_interval)
            self.timer = threading.Timer(self.fetch_interval, self.notify_channel_of_changes)
            self.timer.start()
        else:
            logging.warning('Not setting a feed monitor timer.')
Example #26
0
File: cli.py Project: pabloxxl/revi
 def performExternalEnter(self):
     if self.browser is None:
         lg.warning("Tried to open external link without browser set!")
     else:
         link = self.currObject.getLink(self.currObject.currLine)
         os.system("%s %s" % (self.browser, link.url))
         self.setMsg("Link loaded externaly")
Example #27
0
def start_dcvoip_process():
    try:
        logger.info("Starting dcvoip process - Thanks Jonas Karlsson!")
        with open(os.devnull, 'wb') as devnull:    
            subprocess.check_call(["sudo", "service", "dcvoip", "start"], stdout=devnull)    
    except (subprocess.CalledProcessError, IOError):
        logging.warning("Unable to start the dcvoip process")
Example #28
0
    def _on_request_token_more(self, authorize_url, callback_uri, response, app_name):
        """
        :param authorize_url:
        :param callback_uri:
        :param response:
        :return:
        """
        if not response:
            logging.warning('Could not get OAuth request token.')
            abort(500)
        elif response.status_code < 200 or response.status_code >= 300:
            logging.warning('Invalid OAuth response (%d): %s',
                response.status_code, response.content)
            abort(500)

        request_token = _oauth_parse_responsex(response.content)
        data = '|'.join([request_token['key'], request_token['secret']])
        self.set_cookie('_oauth_request_token', data)
        args = dict(
            application_name=self._OAUTH_APP_NAME,
            oauth_callback=self.request.url,
            oauth_consumer_key=self._netflix_consumer_key(),
            oauth_token=request_token['key']
            )
        if callback_uri:
            args['oauth_callback'] = urlparse.urljoin(
                self.request.url, callback_uri)

        return redirect(authorize_url + '?' + urllib.urlencode(args))
Example #29
0
	def getOAuth(self):
		""" Gets the OAuth tokens using Happn's API """
		h=headers
		# Update OAuth specific headers
		h.update({'Content-Type':'application/x-www-form-urlencoded; charset=UTF-8'})
		h.update({'Content-Length': '439'})

		payload = {
			'client_id' : client_id,
			'client_secret' : client_secret,
			'grant_type' : 'assertion',
			'assertion_type' : 'facebook_access_token',
			'assertion' : self.fbtoken,
			'scope':'mobile_app'
		}
		url = 'https://api.happn.fr/connect/oauth/token'
		try:
			r = requests.post(url,headers=h,data=payload)
		except:
			logging.warning('Error creating connection to Happn server')
			# Without auth token cant use sybil
			return False

		# Check response validity
		if r.status_code == 200:
			# Succesfully got oauth token			
			self.id = r.json()['user_id']
			logging.info('Fetched Happn OAuth token:, %s', r.json()['access_token'])
			return r.json()['access_token']
		else:
			# Error code returned from server (but server was accessible)
			loggging.warning('Server denied request for OAuth token. Status: %d', r.Status)
Example #30
0
  def RunBinaries(self, timeout=60):
    """Run binaries contained in an application package.

    Args:
      timeout: The maximum duration that each binary should run.
    """

    # Add leading slash to package binary paths, since they are recorded as
    # paths relative to the package extract directory.
    package_binaries = set('/%s' % s for s in FileAnalyzer.GetBinaries())
    excluded_binaries = self._GetExcludedBinaries()
    dev_null = open(os.devnull, 'w')
    for binary in package_binaries - excluded_binaries:
      # Exclude .so files.
      if binary.endswith('.so'):
        continue
      # Launch in a timed subprocess, since this may take arbitrarily long
      # otherwise.
      subproc = TimedSubprocess(timeout)
      if subproc.Popen(BinaryLauncher._MakeBinaryCommand(binary, True),
                       stdout=dev_null, stderr=dev_null):
        if subproc.Wait():
          logging.warning('Binary %s terminated with errors.', binary)
      else:
        logging.warning('Could not start binary %s.', binary)
    dev_null.close()
Example #31
0
def run():
    is_main_run_factor = 5
    #filenames=['toy10','toy50','emotions','medical','enron','yeast','scene','cal500','fp','cancer']
    #filenames=['cancer']
    filenames = ['toy10', 'toy50', 'emotions', 'yeast', 'scene', 'fp']
    n = 0
    # generate job_queue
    logging.info('\t\tGenerating priority queue.')
    for newton_method in ['1', '0']:
        for filename in filenames:
            for slack_c in ['1', '10', '0.1']:
                for t in [1, 5, 10, 20, 30]:
                    para_t = "%d" % (t)
                    graph_type = 'tree'
                    for kappa in [
                            '1', '2', '3', '4', '5', '6', '8', '10', '12',
                            '14', '16'
                    ]:
                        for l_norm in ['2']:
                            #for kth_fold in ['1','2','3','4','5']:
                            for kth_fold in ['1']:
                                for loss_scaling_factor in ['0.1', '1']:
                                    if checkfile(filename, graph_type, para_t,
                                                 kth_fold, l_norm, kappa,
                                                 slack_c, loss_scaling_factor,
                                                 newton_method):
                                        continue
                                    else:
                                        n = n + 1
                                        job_queue.put(
                                            (n, (filename, graph_type, para_t,
                                                 kth_fold, l_norm, kappa,
                                                 slack_c, loss_scaling_factor,
                                                 newton_method)))
                                    pass  # for newton_method
                                pass  # for loss_scaling_factor
                            pass  # for slack_c
                        pass  # for |T|
                    pass  # for l
                pass  # for kappa
            pass  # for datasets
        pass  # for k fole
    # get computing nodes
    cluster = get_free_nodes(
    )[0]  # if you have access to some interactive computer cluster, get the list of hostnames of the cluster
    #cluster = ['melkinkari'] # if you don't have access to any computer cluster, just use your machine as the only computing node
    # running job_queue
    job_size = job_queue.qsize()
    logging.info("\t\tProcessing %d job_queue" % (job_size))
    threads = []
    for i in range(len(cluster)):
        if job_queue.empty():
            break
        t = Worker(job_queue, cluster[i])
        time.sleep(is_main_run_factor)
        try:
            t.start()
            threads.append(t)
        except ThreadError:
            logging.warning("\t\tError: thread error caught!")
        pass
    for t in threads:
        t.join()
        pass
    pass  # def
Example #32
0
    def _create_batch_asr(self, x_feats_dict, y_feats_dict, uttid_list):
        """Create a OrderedDict for the mini-batch

        :param OrderedDict x_feats_dict:
            e.g. {"input1": [ndarray, ndarray, ...],
                  "input2": [ndarray, ndarray, ...]}
        :param OrderedDict y_feats_dict:
            e.g. {"target1": [ndarray, ndarray, ...],
                  "target2": [ndarray, ndarray, ...]}
        :param: List[str] uttid_list:
            Give uttid_list to sort in the same order as the mini-batch
        :return: batch, uttid_list
        :rtype: Tuple[OrderedDict, List[str]]
        """
        # handle single-input and multi-input (paralell) asr mode
        xs = list(x_feats_dict.values())

        if self.load_output:
            if len(y_feats_dict) == 1:
                ys = list(y_feats_dict.values())[0]
                assert len(xs[0]) == len(ys), (len(xs[0]), len(ys))

                # get index of non-zero length samples
                nonzero_idx = list(filter(lambda i: len(ys[i]) > 0, range(len(ys))))
            elif len(y_feats_dict) > 1:  # multi-speaker asr mode
                ys = list(y_feats_dict.values())
                assert len(xs[0]) == len(ys[0]), (len(xs[0]), len(ys[0]))

                # get index of non-zero length samples
                nonzero_idx = list(filter(lambda i: len(ys[0][i]) > 0, range(len(ys[0]))))
                for n in range(1, len(y_feats_dict)):
                    nonzero_idx = filter(lambda i: len(ys[n][i]) > 0, nonzero_idx)
        else:
            # Note(kamo): Be careful not to make nonzero_idx to a generator
            nonzero_idx = list(range(len(xs[0])))

        if self.sort_in_input_length:
            # sort in input lengths based on the first input
            nonzero_sorted_idx = sorted(nonzero_idx, key=lambda i: -len(xs[0][i]))
        else:
            nonzero_sorted_idx = nonzero_idx

        if len(nonzero_sorted_idx) != len(xs[0]):
            logging.warning(
                'Target sequences include empty tokenid (batch {} -> {}).'
                .format(len(xs[0]), len(nonzero_sorted_idx)))

        # remove zero-length samples
        xs = [[x[i] for i in nonzero_sorted_idx] for x in xs]
        uttid_list = [uttid_list[i] for i in nonzero_sorted_idx]

        x_names = list(x_feats_dict.keys())
        if self.load_output:
            if len(y_feats_dict) == 1:
                ys = [ys[i] for i in nonzero_sorted_idx]
            elif len(y_feats_dict) > 1:  # multi-speaker asr mode
                ys = zip(*[[y[i] for i in nonzero_sorted_idx] for y in ys])

            y_name = list(y_feats_dict.keys())[0]

            # Keeping x_name and y_name, e.g. input1, for future extension
            return_batch = OrderedDict([*[(x_name, x) for x_name, x in zip(x_names, xs)], (y_name, ys)])
        else:
            return_batch = OrderedDict([(x_name, x) for x_name, x in zip(x_names, xs)])
        return return_batch, uttid_list
Example #33
0
#Logging Levels


#   DEBUG      logging.debug()        #The lowest level. Used for the small details. Usually for diagnosing problems
#   INFO       logging.info()         #Used to record info on general events in your porgam or confirm things are working at certain points
#   WARNING    logging.warning()      #Used to indicate a potential problem may present a problem in the future
#   ERROR      logging.error()        #Used to record an error that cause the program to fail
#   CRITICAL   logging.critical()     #The highest level. Used to indicate a fatal error that caused the program to stop entirely.

#Its up to progammer to decide when and where to use these

import logging
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s - %(message)s')
logging.debug('Some bugging detail')
logging.info('Some bugging info')
logging.warning('Some bug warning')
logging.error('Some bug error')
logging.critical('Something critical')

#by changing level=somelevel in the .basicConfig statment you can choose which levels to ignore
    #level=logging.WARNING, will only show WARNING, ERROR and CRITICAL.
        #It will skip DEBUG and INFO

###Disabling Logging
#logging.disable() will disable all logging functions
    #pass a logging level through logging.disable(HERE) to disable at that level and lower
        #logging.disable(logging.CRITICAL) will disable all logging

import logging
logging.basicConfig(level=logging.INFO, format=' %(asctime)s - %(levelname)s - %(message)s')
logging.critical('Critical Error! Critical Error')
Example #34
0
USE_S3 = (AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
          and AWS_STORAGE_BUCKET_NAME and AWS_STORAGE_REGION)

if USE_S3:
    AWS_S3_CUSTOM_DOMAIN = env.str("AWS_S3_CUSTOM_DOMAIN", "")
    AWS_S3_OBJECT_PARAMETERS = {"CacheControl": "max-age=86400"}
    AWS_DEFAULT_ACL = env.str("AWS_DEFAULT_ACL", "public-read")
    AWS_MEDIA_LOCATION = env.str("AWS_MEDIA_LOCATION", "media")
    AWS_AUTO_CREATE_BUCKET = env.bool("AWS_AUTO_CREATE_BUCKET", True)
    DEFAULT_FILE_STORAGE = env.str("DEFAULT_FILE_STORAGE",
                                   "home.storage_backends.MediaStorage")
    MEDIA_URL = "/mediafiles/"
    MEDIA_ROOT = os.path.join(BASE_DIR, "mediafiles")

# start fcm_django push notifications
FCM_DJANGO_SETTINGS = {"FCM_SERVER_KEY": env.str("FCM_SERVER_KEY", "")}
# end fcm_django push notifications

# Swagger settings for api docs
SWAGGER_SETTINGS = {
    "DEFAULT_INFO": f"{ROOT_URLCONF}.api_info",
}

if DEBUG or not (EMAIL_HOST_USER and EMAIL_HOST_PASSWORD):
    # output email to console instead of sending
    if not DEBUG:
        logging.warning(
            "You should setup `SENDGRID_USERNAME` and `SENDGRID_PASSWORD` env vars to send emails."
        )
    EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
Example #35
0
#####################################################
# find+apply shift w.r.t. first image
if args.shift:
    ref_cat = all_images[0].cat
    # keep only point sources
    print(ref_cat)
    for image in all_images[1:]:
        # cross match
        idx_match, sep, _ = match_coordinates_sky(SkyCoord(ref_cat['RA'], ref_cat['DEC']),\
                                             SkyCoord(image.cat['RA'], image.cat['DEC']))
        idx_matched_ref = np.arange(0,len(ref_cat))[sep<target_beam[0]*u.degree]
        idx_matched_img = idx_match[sep<target_beam[0]*u.degree]

        # find & apply shift
        if len(idx_match) < 3:
            logging.warning('%s: Not enough matches found, assume no shift.' % image.imagefile)
            continue
            
        dra = ref_cat['RA'][idx_matched_ref] - image.cat['RA'][idx_matched_img]
        dra[ dra>180 ] -= 360
        dra[ dra<-180 ] += 360
        ddec = ref_cat['DEC'][idx_matched_ref] - image.cat['DEC'][idx_matched_img]
        flux = ref_cat['Peak_flux'][idx_matched_ref]
        image.apply_shift(np.average(dra, weights=flux), np.average(ddec, weights=flux))

    # clean up
    #for image in all_images:
    #    os.system(rm ...)

 
######################################################
import logging

logging.basicConfig(filename='abc.log',
                    level=logging.INFO,
                    format='%(asctime)s %(levelname)s: %(message)s',
                    datefmt='%Y-%m-%d %H:%M:%S')
a = "defined"
logging.debug("This is debug")
logging.info('This is info')
logging.warning('This is warning')
logging.error('This is error {}'.format(a))
Example #37
0
    if not plan_targets:
        connect.await_user_input(
            "The target list is empty." +
            " Please apply type PTV to the targets and continue.")
        for r in case.PatientModel.RegionsOfInterest:
            if r.OrganData.OrganType == 'Target':
                plan_targets.append(r.Name)
    if not plan_targets:
        status.finish('Script cancelled, targets were not supplied')
        sys.exit('Script cancelled')
    # TODO: Add handling for empty targets or geometries

    # Look for the sim point, if not create a point
    sim_point_found = any(poi.Name == 'SimFiducials' for poi in pois)
    if sim_point_found:
        logging.warning("POI SimFiducials Exists")
        status.next_step(
            text="SimFiducials Point found, ensure that it is placed properly")
        connect.await_user_input(
            'Ensure Correct placement of the SimFiducials Point and continue script.'
        )
    else:
        case.PatientModel.CreatePoi(Examination=examination,
                                    Point={
                                        'x': 0,
                                        'y': 0,
                                        'z': 0
                                    },
                                    Volume=0,
                                    Name="SimFiducials",
                                    Color="Green",
Example #38
0
 async def remove(self):
     args = [self.getValue(self.__primary_key__)]
     rows = await execute(self.__delete__, args)
     if rows != 1:
         logging.warning('failed to remove by primary key: affected rows: %s' % rows)
Example #39
0
 async def save(self):
     args = list(map(self.getValueOrDefault, self.__field__))
     args.append(self.getValueOrDefault(self.__primary_key__))
     rows = await execute(self.__insert__, args)
     if rows !=1:
         logging.warning('failed to insert record: affected rows: %s' % rows)
Example #40
0
    def _Simulate(self, test_data_generators, clean_capture_input_filepath,
                  render_input_filepath, test_data_cache_path,
                  echo_test_data_cache_path, output_path, config_filepath,
                  echo_path_simulator):
        """Runs a single set of simulation.

    Simulates a given combination of APM configuration, probing signal, and
    test data generator. It iterates over the test data generator
    internal configurations.

    Args:
      test_data_generators: TestDataGenerator instance.
      clean_capture_input_filepath: capture input audio track file to be
                                    processed by a test data generator and
                                    not affected by echo.
      render_input_filepath: render input audio track file to test.
      test_data_cache_path: path for the generated test audio track files.
      echo_test_data_cache_path: path for the echo simulator.
      output_path: base output path for the test data generator.
      config_filepath: APM configuration file to test.
      echo_path_simulator: EchoPathSimulator instance.
    """
        # Generate pairs of noisy input and reference signal files.
        test_data_generators.Generate(
            input_signal_filepath=clean_capture_input_filepath,
            test_data_cache_path=test_data_cache_path,
            base_output_path=output_path)

        # Extract metadata linked to the clean input file (if any).
        apm_input_metadata = None
        try:
            apm_input_metadata = data_access.Metadata.LoadFileMetadata(
                clean_capture_input_filepath)
        except IOError as e:
            apm_input_metadata = {}
        apm_input_metadata['test_data_gen_name'] = test_data_generators.NAME
        apm_input_metadata['test_data_gen_config'] = None

        # For each test data pair, simulate a call and evaluate.
        for config_name in test_data_generators.config_names:
            logging.info(' - test data generator config: <%s>', config_name)
            apm_input_metadata['test_data_gen_config'] = config_name

            # Paths to the test data generator output.
            # Note that the reference signal does not depend on the render input
            # which is optional.
            noisy_capture_input_filepath = (
                test_data_generators.noisy_signal_filepaths[config_name])
            reference_signal_filepath = (
                test_data_generators.reference_signal_filepaths[config_name])

            # Output path for the evaluation (e.g., APM output file).
            evaluation_output_path = test_data_generators.apm_output_paths[
                config_name]

            # Paths to the APM input signals.
            echo_path_filepath = echo_path_simulator.Simulate(
                echo_test_data_cache_path)
            apm_input_filepath = input_mixer.ApmInputMixer.Mix(
                echo_test_data_cache_path, noisy_capture_input_filepath,
                echo_path_filepath)

            # Simulate a call using APM.
            self._audioproc_wrapper.Run(
                config_filepath=config_filepath,
                capture_input_filepath=apm_input_filepath,
                render_input_filepath=render_input_filepath,
                output_path=evaluation_output_path)

            try:
                # Evaluate.
                self._evaluator.Run(
                    evaluation_score_workers=self._evaluation_score_workers,
                    apm_input_metadata=apm_input_metadata,
                    apm_output_filepath=self._audioproc_wrapper.
                    output_filepath,
                    reference_input_filepath=reference_signal_filepath,
                    output_path=evaluation_output_path)

                # Save simulation metadata.
                data_access.Metadata.SaveAudioTestDataPaths(
                    output_path=evaluation_output_path,
                    clean_capture_input_filepath=clean_capture_input_filepath,
                    echo_free_capture_filepath=noisy_capture_input_filepath,
                    echo_filepath=echo_path_filepath,
                    render_filepath=render_input_filepath,
                    capture_filepath=apm_input_filepath,
                    apm_output_filepath=self._audioproc_wrapper.
                    output_filepath,
                    apm_reference_filepath=reference_signal_filepath)
            except exceptions.EvaluationScoreException as e:
                logging.warning('the evaluation failed: %s', e.message)
                continue
Example #41
0
    def reset(self):
        """
        Reset energy meter and start sampling from channels specified in the
        target configuration.
        """
        # Terminate already running iio-capture instance (if any)
        wait_for_termination = 0
        for proc in psutil.process_iter():
            if self._iiocapturebin not in proc.cmdline():
                continue
            for channel in self._channels:
                if self._iio_device(channel) in proc.cmdline():
                    logging.debug('%14s - Killing previous iio-capture for [%s]',
                                 'ACME', self._iio_device(channel))
                    logging.debug('%14s - %s', 'ACME', proc.cmdline())
                    proc.kill()
                    wait_for_termination = 2

        # Wait for previous instances to be killed
        sleep(wait_for_termination)

        # Start iio-capture for all channels required
        for channel in self._channels:
            ch_id = self._channels[channel]

            # Setup CSV file to collect samples for this channel
            csv_file = '{}/{}'.format(
                self._res_dir,
                'samples_{}.csv'.format(channel)
            )

            # Start a dedicated iio-capture instance for this channel
            self._iio[ch_id] = Popen([self._iiocapturebin, '-n',
                                       self._hostname, '-o',
                                       '-c', '-f',
                                       csv_file,
                                       self._iio_device(channel)],
                                       stdout=PIPE, stderr=STDOUT)

        # Wait few milliseconds before to check if there is any output
        sleep(1)

        # Check that all required channels have been started
        for channel in self._channels:
            ch_id = self._channels[channel]

            self._iio[ch_id].poll()
            if self._iio[ch_id].returncode:
                logging.error('%14s - Failed to run %s for %s', 'ACME',
                              self._iiocapturebin, self._str(channel))
                logging.warning('\n\n'\
                    '  Make sure there are no iio-capture processes\n'\
                    '  connected to %s and device %s\n',
                    self._hostname, self._str(channel))
                out, _ = self._iio[ch_id].communicate()
                logging.error('%14s - Output: [%s]', 'ACME', out.strip())
                self._iio[ch_id] = None
                raise RuntimeError('iio-capture connection error')

        logging.debug('%14s - Started %s on %s...', 'ACME',
                      self._iiocapturebin, self._str(channel))
Example #42
0
 async def update(self):
     args = list(map(self.getValue, self.__fields__))
     args.append(self.getValue(self.__primary_key__))
     rows = await execute(self.__update__, args)
     if rows != 1:
         logging.warning('failed to update by primary key: affected rows: %s' % rows)
Example #43
0
def main(pkt_type, ipaddr, portnum, raw, mac_seqnum, panid, dstshortaddr,
         srcshortaddr, srcextendedaddr, pancoord, assocpermit, devdepth, epid,
         updateid, nwk_seqnum, devtype, powsrc, rxidle):
    """Inject a forged packet."""
    # Sanity checks
    if mac_seqnum < 0 or mac_seqnum > 255:
        raise ValueError("Invalid MAC sequence number")
    elif pancoord not in {0, 1}:
        raise ValueError("Invalid PAN Coordinator field value")
    elif assocpermit not in {0, 1}:
        raise ValueError("Invalid Association Permit field value")
    elif devdepth < 0 or devdepth > 15:
        raise ValueError("Invalid Device Depth field value")
    elif updateid < 0 or updateid > 255:
        raise ValueError("Invalid Update ID field value")
    elif nwk_seqnum < 0 or nwk_seqnum > 255:
        raise ValueError("Invalid NWK sequence number")
    elif devtype not in {0, 1}:
        raise ValueError("Invalid Device Type field value")
    elif powsrc not in {0, 1}:
        raise ValueError("Invalid Power Source field value")
    elif rxidle not in {0, 1}:
        raise ValueError("Invalid Receiver On When Idle field value")

    # Forge a packet based on the provided parameter values
    if pkt_type.lower() == "mpdu":
        # Process some of the provided parameter values
        if raw is None:
            raw = DEFAULT_RAW
            logging.warning("Unspecified raw bytes; defaulted "
                            "to \"{}\"".format(raw))
        # Forge the packet
        forged_pkt = bytes.fromhex(raw)
        # Sanity check
        if len(forged_pkt) < 1 or len(forged_pkt) > 127:
            raise ValueError("Invalid MPDU length")
        # Print a disclaimer
        print("############################################################")
        print("#                        DISCLAIMER                        #")
        print("#                                                          #")
        print("# The injection of a forged packet may interfere with the  #")
        print("# operation of legitimate IEEE 802.15.4-based networks.    #")
        print("# The users of this tool are responsible for making sure   #")
        print("# that they are compliant with their local laws and that   #")
        print("# they have proper permission from the affected network    #")
        print("# owners.                                                  #")
        print("############################################################")
        answer = input("Are you sure that you want to proceed? [y/N] ")
        # Check the provided answer
        if answer == "y":
            print("You accepted responsibility for your actions")
        else:
            logging.info("Canceling the injection of a forged packet...")
            return
    elif pkt_type.lower() == "beacon":
        # Process some of the provided parameter values
        if panid is None:
            panid = DEFAULT_PANID
            logging.warning("Unspecified PAN ID; defaulted "
                            "to \"0x{:04x}\"".format(panid))
        else:
            panid = int(panid, 16)
            if panid < 0 or panid.bit_length() > 16:
                raise ValueError("Invalid PAN ID")
        if srcshortaddr is None:
            srcshortaddr = DEFAULT_SRCSHORTADDR
            logging.warning("Unspecified short source address; "
                            "defaulted to \"0x{:04x}\"".format(srcshortaddr))
        else:
            srcshortaddr = int(srcshortaddr, 16)
            if srcshortaddr < 0 or srcshortaddr.bit_length() > 16:
                raise ValueError("Invalid short source address")
        if epid is None:
            epid = DEFAULT_EPID
            logging.warning("Unspecified EPID; defaulted "
                            "to \"{:016x}\"".format(epid))
        else:
            epid = int(epid, 16)
            if panid < 0 or panid.bit_length() > 64:
                raise ValueError("Invalid PAN ID")
        # Forge the packet
        forged_pkt = beacon(mac_seqnum, panid, srcshortaddr, pancoord,
                            assocpermit, devdepth, epid, updateid)
        # Print a disclaimer
        print("############################################################")
        print("#                        DISCLAIMER                        #")
        print("#                                                          #")
        print("# The injection of a forged beacon may interfere with the  #")
        print("# operation of legitimate IEEE 802.15.4-based networks.    #")
        print("# The users of this tool are responsible for making sure   #")
        print("# that they are compliant with their local laws and that   #")
        print("# they have proper permission from the affected network    #")
        print("# owners.                                                  #")
        print("############################################################")
        answer = input("Are you sure that you want to proceed? [y/N] ")
        # Check the provided answer
        if answer == "y":
            print("You accepted responsibility for your actions")
        else:
            logging.info("Canceling the injection of a forged packet...")
            return
    elif pkt_type.lower() == "beaconreq":
        # Forge the packet
        forged_pkt = beaconreq(mac_seqnum)
        # Print a disclaimer
        print("############################################################")
        print("#                        DISCLAIMER                        #")
        print("#                                                          #")
        print("# The injection of a Beacon Request may interfere with the #")
        print("# operation of legitimate IEEE 802.15.4-based networks.    #")
        print("# The users of this tool are responsible for making sure   #")
        print("# that they are compliant with their local laws and that   #")
        print("# they have proper permission from the affected network    #")
        print("# owners.                                                  #")
        print("############################################################")
        answer = input("Are you sure that you want to proceed? [y/N] ")
        # Check the provided answer
        if answer == "y":
            print("You accepted responsibility for your actions")
        else:
            logging.info("Canceling the injection of a forged packet...")
            return
    elif pkt_type.lower() == "orphannotif":
        # Process some of the provided parameter values
        if srcextendedaddr is None:
            srcextendedaddr = DEFAULT_SRCEXTENDEDADDR
            logging.warning("Unspecified extended source address; defaulted "
                            "to \"{:016x}\"".format(srcextendedaddr))
        else:
            srcextendedaddr = int(srcextendedaddr, 16)
            if srcextendedaddr < 0 or srcextendedaddr.bit_length() > 64:
                raise ValueError("Invalid extended source address")
        # Forge the packet
        forged_pkt = orphannotif(mac_seqnum, srcextendedaddr)
        # Print a disclaimer
        print("############################################################")
        print("#                        DISCLAIMER                        #")
        print("#                                                          #")
        print("# The injection of an Orphan Notification may interfere    #")
        print("# with the operation of legitimate IEEE 802.15.4-based     #")
        print("# networks. The users of this tool are responsible for     #")
        print("# making sure that they are compliant with their local     #")
        print("# laws and that they have proper permission from the       #")
        print("# affected network owners.                                 #")
        print("############################################################")
        answer = input("Are you sure that you want to proceed? [y/N] ")
        # Check the provided answer
        if answer == "y":
            print("You accepted responsibility for your actions")
        else:
            logging.info("Canceling the injection of a forged packet...")
            return
    elif pkt_type.lower() == "rejoinreq":
        # Process some of the provided parameter values
        if panid is None:
            panid = DEFAULT_PANID
            logging.warning("Unspecified PAN ID; defaulted "
                            "to \"0x{:04x}\"".format(panid))
        else:
            panid = int(panid, 16)
            if panid < 0 or panid.bit_length() > 16:
                raise ValueError("Invalid PAN ID")
        if dstshortaddr is None:
            dstshortaddr = DEFAULT_DSTSHORTADDR
            logging.warning("Unspecified short destination address; "
                            "defaulted to \"0x{:04x}\"".format(dstshortaddr))
        else:
            dstshortaddr = int(dstshortaddr, 16)
            if dstshortaddr < 0 or dstshortaddr.bit_length() > 16:
                raise ValueError("Invalid short destination address")
        if srcshortaddr is None:
            srcshortaddr = DEFAULT_SRCSHORTADDR
            logging.warning("Unspecified short source address; "
                            "defaulted to \"0x{:04x}\"".format(srcshortaddr))
        else:
            srcshortaddr = int(srcshortaddr, 16)
            if srcshortaddr < 0 or srcshortaddr.bit_length() > 16:
                raise ValueError("Invalid short source address")
        if srcextendedaddr is None:
            srcextendedaddr = DEFAULT_SRCEXTENDEDADDR
            logging.warning("Unspecified extended source address; defaulted "
                            "to \"{:016x}\"".format(srcextendedaddr))
        else:
            srcextendedaddr = int(srcextendedaddr, 16)
            if srcextendedaddr < 0 or srcextendedaddr.bit_length() > 64:
                raise ValueError("Invalid extended source address")
        # Forge the packet
        forged_pkt = rejoinreq(mac_seqnum, panid, dstshortaddr, srcshortaddr,
                               nwk_seqnum, srcextendedaddr, devtype, powsrc,
                               rxidle)
        # Print a disclaimer
        print("############################################################")
        print("#                        DISCLAIMER                        #")
        print("#                                                          #")
        print("# The injection of an unsecured Rejoin Request may result  #")
        print("# in the disclosure of the network key that a legitimate   #")
        print("# Zigbee network is using and may also interfere with the  #")
        print("# operation of legitimate IEEE 802.15.4-based networks.    #")
        print("# The users of this tool are responsible for making sure   #")
        print("# that they are compliant with their local laws and that   #")
        print("# they have proper permission from the affected network    #")
        print("# owners.                                                  #")
        print("############################################################")
        answer = input("Are you sure that you want to proceed? [y/N] ")
        # Check the provided answer
        if answer == "y":
            print("You accepted responsibility for your actions")
        else:
            logging.info("Canceling the injection of a forged packet...")
            return
    else:
        raise ValueError("Unknown packet type \"{}\"".format(pkt_type))

    # Send the forged packet to an SDR over a UDP connection
    with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as tx_sock:
        tx_sock.sendto(bytes(forged_pkt), (ipaddr, portnum))
    logging.info("Sent the following packet over UDP: "
                 "{}".format(bytes(forged_pkt).hex()))
Example #44
0
    def update(self):
        """
            Called whenever we want to parse the RSS feed again.
            Updates metadata from the feed and gets new entries.
            Stores all the data in the respective json files.
        """

        if not self.active:
            # Don't try to update a feed that has been deactivated
            logging.warning('Feed "{name}" is deactivated, '
                            'so couldn\'t be updated'.format(name=self.name))
            return

        # Try retrieving the feed
        feed = fp.parse(self.link, etag=self.etag, modified=self.modified)

        if feed.get('bozo', 0):
            # There's been an error in the parsing of the XML feed
            self.active = False
            raise exceptions.BozoException(self, feed)

        # Check if we got a redirect or an error
        status = feed.get('status', 500)

        if status == 304:
            # Not modified. Don't do anything else
            logging.info('Feed "{name}" not modified'.format(name=self.name))
            return

        elif 300 <= status and status < 400:
            # Consider redirects to another location
            logging.info('Feed "{name}" redirected from "{old}" to "{new}"'.\
                format(
                    name=self.name,
                    old=self.link,
                    new=feed.get('href', '')
                )
            )

            # If there's a new url, it will be updated just after this if

        elif 400 <= status and status < 500:
            # Has the feed disappeared?
            logging.warning(
                'Feed "{name}" lost with status %d'.format(name=self.name) %
                status)
            self.active = False
            raise exceptions.LostFeedException(self, feed)

        elif status >= 500:
            # Server Exception. Notify it, but don't do anything else.
            # We'll try later
            logging.warning(
                'Feed "{name}" got status code %d'.format(name=self.name) %
                status)
            return

        self.etag = feed.get('etag')
        self.modified = feed.get('modified')
        self.link = feed.get('href', self.link)
        self.language = feed.get('language')

        # Retrieve new entries, asign them an index and put them in the list
        self.save_entries(self.new_entries(feed))
        self.save_metadata()
Example #45
0
def Credentials():

    try:
        SurlEncryptedValue = os.environ['ssurl']
        SurlDecryptedValue = boto3.client('kms').decrypt(CiphertextBlob=b64decode(SurlEncryptedValue))['Plaintext']
        
        SsgetsecEncryptedValue = os.environ['ssgetsec']
        SsgetsecDecryptedValue = boto3.client('kms').decrypt(CiphertextBlob=b64decode(SsgetsecEncryptedValue))['Plaintext']

        SuserEncryptedValue = os.environ['suser']
        SuserDecryptedValue = boto3.client('kms').decrypt(CiphertextBlob=b64decode(SuserEncryptedValue))['Plaintext']
       
        SpassEncryptedValue = os.environ['spass']
        SpassDecryptedValue = boto3.client('kms').decrypt(CiphertextBlob=b64decode(SpassEncryptedValue))['Plaintext']
        
        SidEncryptedValue = os.environ['sid']
        SidDecryptedValue = boto3.client('kms').decrypt(CiphertextBlob=b64decode(SidEncryptedValue))['Plaintext']
        
        SnsidEncryptedValue = os.environ['snsid']
        SnsidDecryptedValue = boto3.client('kms').decrypt(CiphertextBlob=b64decode(SnsidEncryptedValue))['Plaintext']
        
        JeasessionEncryptedValue = os.environ['jeasession']
        JeaSessionName = boto3.client('kms').decrypt(CiphertextBlob=b64decode(JeasessionEncryptedValue))['Plaintext']
        
        SnowinstanceEncryptedValue = os.environ['snowinstance']
        SnowInstanceName = boto3.client('kms').decrypt(CiphertextBlob=b64decode(SnowinstanceEncryptedValue))['Plaintext']
        
        JumpboxServerEncryptedValue = os.environ['jumpserver']
        JumpboxServerIpAddress = boto3.client('kms').decrypt(CiphertextBlob=b64decode(JumpboxServerEncryptedValue))['Plaintext']
        
        headers = {"Content-Type": "application/x-www-form-urlencoded"}
        
        creds = {
                 "username": SuserDecryptedValue,
                 "password": SpassDecryptedValue,
                 "organization": "",
                 "domain": "ads"
                }

        # Fetching Token Number for Service Account
        ServiceAccounToken = requests.post(SurlDecryptedValue, data=creds, headers=headers, verify=False)      
        soup = BeautifulSoup(ServiceAccounToken.content)
        token = soup.find('token').string
        secret = {
                  "secretId": SidDecryptedValue,
                  "token": token
                 }
        # Fetching Service Account Credentials
        ServiceAccounCredentials = requests.post(SsgetsecDecryptedValue, data=secret, headers=headers, verify=False)
        
        soup = BeautifulSoup(ServiceAccounCredentials.content)
        ServiceAccountCredentialsOutput = soup.findAll("value")
            
        i = 0
        for user in ServiceAccountCredentialsOutput:
            if i == 0:
                i = i + 1
            elif i <= 2:
                
                StringValue = user.string
                
                if "svc" in StringValue:
                    ServerAccountUserName = StringValue
                else:
                    ServerAccountPassword = StringValue
                i = i + 1
        
        # Fetching Token Number for Snow Account
        SnowAccounToken = requests.post(SurlDecryptedValue, data=creds, headers=headers, verify=False)      
        soup = BeautifulSoup(SnowAccounToken.content)
        token = soup.find('token').string
        secret = {
                  "secretId": SnsidDecryptedValue,
                  "token": token
                 }
        # Fetching Snow Account Credentials
        SnowCredentials = requests.post(SsgetsecDecryptedValue, data=secret, headers=headers, verify=False)
        
        soup = BeautifulSoup(SnowCredentials.content)
        SnowCredentialsOutput = soup.findAll("value")
            
        i = 0
        for user in SnowCredentialsOutput:
            if i == 0:
                i = i + 1
            elif i <= 2:
                
                StringValue = user.string
                
                if "svc" in StringValue:
                    SnowUserName = StringValue
                else:
                    SnowPassword = StringValue
                i = i + 1
        
        return ServerAccountUserName, ServerAccountPassword, JeaSessionName, SnowInstanceName, SnowUserName, SnowPassword, JumpboxServerIpAddress

    except Exception as e:
        logging.warning("Message : Error at Credentials()...!" + str(e))
def main():
    parser = argparse.ArgumentParser(description='py-kms: KMS Server Emulator',
                                     epilog="version: py-kms_2017-06-01")
    parser.add_argument(
        "ip",
        nargs="?",
        action="store",
        default="0.0.0.0",
        help=
        'The IP address to listen on. The default is \"0.0.0.0\" (all interfaces).',
        type=str)
    parser.add_argument(
        "port",
        nargs="?",
        action="store",
        default=1688,
        help='The network port to listen on. The default is \"1688\".',
        type=int)
    parser.add_argument(
        "-e",
        "--epid",
        dest="epid",
        default=None,
        help=
        'Use this flag to manually specify an ePID to use. If no ePID is specified, a random ePID will be generated.',
        type=str)
    parser.add_argument(
        "-l",
        "--lcid",
        dest="lcid",
        default=1033,
        help=
        'Use this flag to manually specify an LCID for use with randomly generated ePIDs. If an ePID is manually specified,\
this setting is ignored.',
        type=int)
    parser.add_argument(
        "-c",
        "--client-count",
        dest="CurrentClientCount",
        default=26,
        help=
        'Use this flag to specify the current client count. Default is 26. A number >25 is required to enable activation.',
        type=int)
    parser.add_argument(
        "-a",
        "--activation-interval",
        dest="VLActivationInterval",
        default=120,
        help=
        'Use this flag to specify the activation interval (in minutes). Default is 120 minutes (2 hours).',
        type=int)
    parser.add_argument(
        "-r",
        "--renewal-interval",
        dest="VLRenewalInterval",
        default=1440 * 7,
        help=
        'Use this flag to specify the renewal interval (in minutes). Default is 10080 minutes (7 days).',
        type=int)
    parser.add_argument(
        "-s",
        "--sqlite",
        dest="sqlite",
        action="store_const",
        const=True,
        default=False,
        help=
        'Use this flag to store request information from unique clients in an SQLite database.'
    )
    parser.add_argument(
        "-w",
        "--hwid",
        dest="hwid",
        action="store",
        default='364F463A8863D35F',
        help=
        'Use this flag to specify a HWID. The HWID must be an 16-character string of hex characters. \
The default is \"364F463A8863D35F\" or type \"random\" to auto generate the HWID.',
        type=str)
    parser.add_argument(
        "-v",
        "--loglevel",
        dest="loglevel",
        action="store",
        default="ERROR",
        choices=["CRITICAL", "ERROR", "WARNING", "INFO", "DEBUG"],
        help='Use this flag to set a Loglevel. The default is \"ERROR\".',
        type=str)
    parser.add_argument(
        "-f",
        "--logfile",
        dest="logfile",
        action="store",
        default=os.path.dirname(os.path.abspath(__file__)) +
        "/pykms_server.log",
        help=
        'Use this flag to set an output Logfile. The default is \"pykms_server.log\".',
        type=str)

    config.update(vars(parser.parse_args()))
    #Random HWID
    if config['hwid'] == "random":
        randomhwid = uuid.uuid4().hex
        config['hwid'] = randomhwid[:16]

    # Sanitize HWID
    try:
        config['hwid'] = binascii.a2b_hex(
            re.sub(r'[^0-9a-fA-F]', '', config['hwid'].strip('0x')))
        if len(binascii.b2a_hex(config['hwid'])) < 16:
            logging.error("HWID \"%s\" is invalid. Hex string is too short." %
                          binascii.b2a_hex(config['hwid']).upper())
            return
        elif len(binascii.b2a_hex(config['hwid'])) > 16:
            logging.error("HWID \"%s\" is invalid. Hex string is too long." %
                          binascii.b2a_hex(config['hwid']).upper())
            return
    except TypeError:
        logging.error("HWID \"%s\" is invalid. Odd-length hex string." %
                      binascii.b2a_hex(config['hwid']).upper())
        return

    logging.basicConfig(level=config['loglevel'],
                        format='%(asctime)s %(levelname)-8s %(message)s',
                        datefmt='%a, %d %b %Y %H:%M:%S',
                        filename=config['logfile'],
                        filemode='w')

    try:
        import sqlite3
        config['dbSupport'] = True
    except:
        logging.warning(
            "Module \"sqlite3\" is not installed, database support disabled.")
        config['dbSupport'] = False
    server = SocketServer.TCPServer((config['ip'], config['port']), kmsServer)
    server.timeout = 5
    logging.info("TCP server listening at %s on port %d." %
                 (config['ip'], config['port']))
    logging.info("HWID: %s" % binascii.b2a_hex(config['hwid']).upper())
    server.serve_forever()
import sys

# Fixing the level of severity
logging.basicConfig(level=logging.DEBUG)

# To disable the logging completely
# Method 1
# logging.disable(sys.maxint) # Python 2
# logging.disable(sys.maxsize)  # Python 3

# Method 2
logging.getLogger().disabled = True

logging.debug("This is a debug2")
logging.info("This is a info2")
logging.warning("This is a warning2")
logging.error("This is a error2")
logging.critical("This is a critical2")
print()

# ----------------------


def addition(n1, n2):
    logging.debug("addition function -start")
    return n1 + n2


addition(1, 2)
addition(1, 2)
addition(1, 2)
Example #48
0
    def __init__(self, logger_level=None,
                 archi_batch_size=None, time_len=None, total_num=None,
                 path_project=None,
                 path_npz=None,
                 path_cache=None,
                 path_origin_data=None):

        self.path_npz = path_npz
        self.path_cache = path_cache
        self.path_origin_data = path_origin_data
        self.path = path_project

        self.logger = logging.getLogger(__name__)
        if not logger_level:
            logger_level = logging.INFO
        self.logger.setLevel(logger_level)

        if not self.path:
            self.path = os.getcwd()

        if not self.path_origin_data:
            self.path_origin_data = self.path + "\\dataset\\Architecture_Energy\\"
        self.path_weather = self.path_origin_data + "\\param_weather.xlsx"
        self.path_archi = self.path_origin_data + "\\param_archi.xlsx"
        self.dir_energy = self.path_origin_data + "\\data_archi_energy"

        if not self.path_cache:
            self.path_cache = self.path + "\\datacache\\Archi_Energy\\"

        if not self.path_npz:
            self.path_npz = self.path + "\\datacache\\Archi_Energy\\_origin"

        # check dataset related directories  do or not exist
        if not os.path.exists(self.path_origin_data):
            logging.warning("no original energy data path")
            os.makedirs(self.path_origin_data)

        if not os.path.exists(self.path_cache):
            logging.warning("no formulated dataset path")
            os.makedirs(self.path_cache)

        if not os.path.exists(self.path_npz):
            logging.warning("no original .npz path")
            os.makedirs(self.path_npz)

        # check the dataset has or hasn't been prepared
        # if not, generate testing-case-level dataset
        if len(os.listdir(self.path_cache)) == 1:
            logging.warning("no formulated dataset")

            if not len(os.listdir(self.path_npz)):
                logging.warning("no original .npz files")

                if not len(os.listdir(self.path_origin_data)):
                    raise FileExistsError("no original energy data")

                logging.info("extract original .xlsx files to original .npz files")
                self.origin_mat2npz_AE()
                logging.info(".npz files has been generated !")

            if not archi_batch_size or not time_len or not total_num:
                raise KeyError("keys [archi_batch_num], [time_len] and [total_num] are needed for sampling")

            logging.info("sampling original .npz files to formulate raw data-set")
            self.dataset_prepare_AE(archi_batch_size, time_len, total_num)
        logging.info("data-set has been prepared !")

        self.training_set, self.testing_set = None, None
import json
import logging
import os

import pandas as pd

from multi_vector_simulator.B0_data_input_json import convert_from_special_types_to_json
from multi_vector_simulator.E1_process_results import get_units_of_cost_matrix_entries
import multi_vector_simulator.F1_plotting as F1_plots

try:
    import multi_vector_simulator.F2_autoreport as autoreport

    AUTOREPORT = True
except ModuleNotFoundError:
    logging.warning("The reporting feature is disabled")
    AUTOREPORT = False

from multi_vector_simulator.utils.constants import (
    SIMULATION_SETTINGS,
    PATH_OUTPUT_FOLDER,
    OUTPUT_FOLDER,
    LOGFILE,
    PATHS_TO_PLOTS,
)

from multi_vector_simulator.utils.constants import (
    JSON_WITH_RESULTS,
    JSON_FILE_EXTENSION,
)
from multi_vector_simulator.utils.constants_json_strings import (
Example #50
0
def main(argv):

    global min_level
    global in_progress_issues
    dojo_url = ''
    dojo_engagement_id=0
    dojo_api_key=''
    cid = ''
    context_file = ''
    progress_file = ''
    config_file = ''
    config_url = ''
    mins = 0
    generate = ''
    port = 0
    detailed_output = True
    report_html = ''
    report_md = ''
    report_xml = ''
    report_json = ''
    target = ''
    zap_alpha = False
    info_unspecified = False
    ajax = False
    base_dir = ''
    zap_ip = 'localhost'
    zap_options = ''
    delay = 0
    timeout = 0
    pass_count = 0
    warn_count = 0
    fail_count = 0
    info_count = 0
    ignore_count = 0
    warn_inprog_count = 0
    fail_inprog_count = 0

    try:
        opts, args = getopt.getopt(argv, "t:c:u:g:m:n:r:J:w:x:l:hdaijp:sz:P:D:T:U:A:I:")
    except getopt.GetoptError as exc:
        logging.warning('Invalid option ' + exc.opt + ' : ' + exc.msg)
        usage()
        sys.exit(3)

    for opt, arg in opts:
        if opt == '-h':
            usage()
            sys.exit(0)
        elif opt == '-t':
            target = arg
            logging.debug('Target: ' + target)
        elif opt == '-c':
            config_file = arg
        elif opt == '-u':
            config_url = arg
        elif opt == '-g':
            generate = arg
        elif opt == '-d':
            logging.getLogger().setLevel(logging.DEBUG)
        elif opt == '-m':
            mins = int(arg)
        elif opt == '-P':
            port = int(arg)
        elif opt == '-D':
            delay = int(arg)
        elif opt == '-n':
            context_file = arg
        elif opt == '-p':
            progress_file = arg
        elif opt == '-r':
            report_html = arg
        elif opt == '-J':
            report_json = arg
        elif opt == '-w':
            report_md = arg
        elif opt == '-x':
            report_xml = arg
        elif opt == '-a':
            zap_alpha = True
        elif opt == '-i':
            info_unspecified = True
        elif opt == '-j':
            ajax = True
        elif opt == '-U':
            dojo_url = arg
        elif opt == '-A':
            dojo_api_key = arg
        elif opt == '-I':
            dojo_engagement_id = arg
        elif opt == '-l':
            try:
                min_level = zap_conf_lvls.index(arg)
            except ValueError:
                logging.warning('Level must be one of ' + str(zap_conf_lvls))
                usage()
                sys.exit(3)
        elif opt == '-z':
            zap_options = arg
        elif opt == '-s':
            detailed_output = False
        elif opt == '-T':
            timeout = int(arg)

    check_zap_client_version()

    # Check target supplied and ok
    if len(target) == 0:
        usage()
        sys.exit(3)

    if not (target.startswith('http://') or target.startswith('https://')):
        logging.warning('Target must start with \'http://\' or \'https://\'')
        usage()
        sys.exit(3)

    if running_in_docker():
        base_dir = '/zap/wrk/'
        if config_file or generate or report_html or report_xml or report_json or progress_file or context_file:
            # Check directory has been mounted
            if not os.path.exists(base_dir):
                logging.warning('A file based option has been specified but the directory \'/zap/wrk\' is not mounted ')
                usage()
                sys.exit(3)

    # Choose a random 'ephemeral' port and check its available if it wasn't specified with -P option
    if port == 0:
        port = get_free_port()

    logging.debug('Using port: ' + str(port))

    if config_file:
        # load config file from filestore
        with open(base_dir + config_file) as f:
            try:
                load_config(f, config_dict, config_msg, out_of_scope_dict)
            except ValueError as e:
                logging.warning("Failed to load config file " + base_dir + config_file + " " + str(e))
                sys.exit(3)
    elif config_url:
        # load config file from url
        try:
            load_config(urlopen(config_url).read().decode('UTF-8'), config_dict, config_msg, out_of_scope_dict)
        except ValueError as e:
            logging.warning("Failed to read configs from " + config_url + " " + str(e))
            sys.exit(3)
        except:
            logging.warning('Failed to read configs from ' + config_url)
            sys.exit(3)

    if progress_file:
        # load progress file from filestore
        with open(base_dir + progress_file) as f:
            progress = json.load(f)
            # parse into something more useful...
            # in_prog_issues = map of vulnid -> {object with everything in}
            for issue in progress["issues"]:
                if issue["state"] == "inprogress":
                    in_progress_issues[issue["id"]] = issue

    if running_in_docker():
        try:
            params = [
                      '-config', 'spider.maxDuration=' + str(mins),
                      '-addonupdate',
                      '-addoninstall', 'pscanrulesBeta']  # In case we're running in the stable container

            if zap_alpha:
                params.append('-addoninstall')
                params.append('pscanrulesAlpha')

            add_zap_options(params, zap_options)

            start_zap(port, params)

        except OSError:
            logging.warning('Failed to start ZAP :(')
            sys.exit(3)

    else:
        # Not running in docker, so start one
        mount_dir = ''
        if context_file:
            mount_dir = os.path.dirname(os.path.abspath(context_file))

        params = [
                  '-config', 'spider.maxDuration=' + str(mins),
                  '-addonupdate',
                  '-addoninstall', 'pscanrulesBeta']  # In case we're running in the stable container

        if (zap_alpha):
            params.extend(['-addoninstall', 'pscanrulesAlpha'])

        add_zap_options(params, zap_options)

        try:
            cid = start_docker_zap('owasp/zap2docker-weekly', port, params, mount_dir)
            zap_ip = ipaddress_for_cid(cid)
            logging.debug('Docker ZAP IP Addr: ' + zap_ip)
        except OSError:
            logging.warning('Failed to start ZAP in docker :(')
            sys.exit(3)

    try:
        zap = ZAPv2(proxies={'http': 'http://' + zap_ip + ':' + str(port), 'https': 'http://' + zap_ip + ':' + str(port)})

        wait_for_zap_start(zap, timeout * 60)

        if context_file:
            # handle the context file, cant use base_dir as it might not have been set up
            res = zap.context.import_context('/zap/wrk/' + os.path.basename(context_file))
            if res.startswith("ZAP Error"):
                logging.error('Failed to load context file ' + context_file + ' : ' + res)

        zap_access_target(zap, target)

        if target.count('/') > 2:
            # The url can include a valid path, but always reset to spider the host
            target = target[0:target.index('/', 8)+1]

        time.sleep(200)

        # Spider target
        zap_spider(zap, target)

        if (ajax):
            zap_ajax_spider(zap, target, mins)

        if (delay):
            start_scan = datetime.now()
            while ((datetime.now() - start_scan).seconds < delay):
                time.sleep(5)
                logging.debug('Delay active scan ' + str(delay -(datetime.now() - start_scan).seconds) + ' seconds')

        if target.count('/') > 2:
            # The url can include a valid path, but always reset to scan the host
            target = target[0:target.index('/', 8)+1]

        # Set up the scan policy
        scan_policy = 'Default Policy'
        if config_dict:
            # They have supplied a config file, use this to define the ascan rules
            zap.ascan.enable_all_scanners(scanpolicyname=scan_policy)
            for scanner, state in config_dict.items():
                if state == 'IGNORE':
                    # Dont bother checking the result - this will fail for pscan rules
                    zap.ascan.set_scanner_alert_threshold(id=scanner, alertthreshold='OFF', scanpolicyname=scan_policy)

        zap_active_scan(zap, target, scan_policy)

        zap_wait_for_passive_scan(zap, timeout * 60)

        # Print out a count of the number of urls
        num_urls = len(zap.core.urls())
        print(zap.core.urls())
        if num_urls == 0:
            logging.warning('No URLs found - is the target URL accessible? Local services may not be accessible from the Docker container')
        else:
            if detailed_output:
                print('Total of ' + str(num_urls) + ' URLs')

            alert_dict = zap_get_alerts(zap, target, blacklist, out_of_scope_dict)

            all_ascan_rules = zap.ascan.scanners('Default Policy')
            all_pscan_rules = zap.pscan.scanners
            all_dict = {}
            for rule in all_pscan_rules:
                plugin_id = rule.get('id')
                if plugin_id in blacklist:
                    continue
                all_dict[plugin_id] = rule.get('name') + ' - Passive/' + rule.get('quality')
            for rule in all_ascan_rules:
                plugin_id = rule.get('id')
                if plugin_id in blacklist:
                    continue
                all_dict[plugin_id] = rule.get('name') + ' - Active/' + rule.get('quality')

            if generate:
                # Create the config file
                with open(base_dir + generate, 'w') as f:
                    f.write('# zap-full-scan rule configuration file\n')
                    f.write('# Change WARN to IGNORE to ignore rule or FAIL to fail if rule matches\n')
                    f.write('# Active scan rules set to IGNORE will not be run which will speed up the scan\n')
                    f.write('# Only the rule identifiers are used - the names are just for info\n')
                    f.write('# You can add your own messages to each rule by appending them after a tab on each line.\n')
                    for key, rule in sorted(all_dict.iteritems()):
                        f.write(key + '\tWARN\t(' + rule + ')\n')

            # print out the passing rules
            pass_dict = {}
            for rule in all_pscan_rules:
                plugin_id = rule.get('id')
                if plugin_id in blacklist:
                    continue
                if (not alert_dict.has_key(plugin_id)):
                    pass_dict[plugin_id] = rule.get('name')
            for rule in all_ascan_rules:
                plugin_id = rule.get('id')
                if plugin_id in blacklist:
                    continue
                if not alert_dict.has_key(plugin_id) and not(config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE'):
                    pass_dict[plugin_id] = rule.get('name')

            if min_level == zap_conf_lvls.index("PASS") and detailed_output:
                for key, rule in sorted(pass_dict.iteritems()):
                    print('PASS: '******' [' + key + ']')

            pass_count = len(pass_dict)

            if detailed_output:
                # print out the ignored ascan rules(there will be no alerts for these as they were not run)
                for rule in all_ascan_rules:
                    plugin_id = rule.get('id')
                    if plugin_id in blacklist:
                        continue
                    if config_dict.has_key(plugin_id) and config_dict[plugin_id] == 'IGNORE':
                        print('SKIP: ' + rule.get('name') + ' [' + plugin_id + ']')

            # print out the ignored rules
            ignore_count, not_used = print_rules(alert_dict, 'IGNORE', config_dict, config_msg, min_level,
                inc_ignore_rules, True, detailed_output, {})

            # print out the info rules
            info_count, not_used = print_rules(alert_dict, 'INFO', config_dict, config_msg, min_level,
                inc_info_rules, info_unspecified, detailed_output, in_progress_issues)

            # print out the warning rules
            warn_count, warn_inprog_count = print_rules(alert_dict, 'WARN', config_dict, config_msg, min_level,
                inc_warn_rules, not info_unspecified, detailed_output, in_progress_issues)

            # print out the failing rules
            fail_count, fail_inprog_count = print_rules(alert_dict, 'FAIL', config_dict, config_msg, min_level,
                inc_fail_rules, True, detailed_output, in_progress_issues)

            if report_html:
                # Save the report
                write_report(base_dir + report_html, zap.core.htmlreport())

            if report_json:
                # Save the report
                write_report(base_dir + report_json, zap.core.jsonreport())

            if report_md:
                # Save the report
                write_report(base_dir + report_md, zap.core.mdreport())

            if report_xml:
                # Save the report
                write_report(base_dir + report_xml, zap.core.xmlreport())

            print('FAIL-NEW: ' + str(fail_count) + '\tFAIL-INPROG: ' + str(fail_inprog_count) +
                '\tWARN-NEW: ' + str(warn_count) + '\tWARN-INPROG: ' + str(warn_inprog_count) +
                '\tINFO: ' + str(info_count) + '\tIGNORE: ' + str(ignore_count) + '\tPASS: '******'result.xml', zap.core.xmlreport())
      
        time.sleep(10)

        #command injection problem i recon
        now = datetime.datetime.now()
        commit_to_dojo = 'curl --request POST --url {0}/api/v1/importscan/ --header \'authorization: ApiKey {1}\' --header \'cache-control: no-cache\' --header \'content-type: multipart/form-data; boundary=----WebKitFormBoundary7MA4YWxkTrZu0gW\' --form minimum_severity=Info --form scan_date={3} --form verified=False --form [email protected] --form tags=Test_automation --form active=True --form engagement=/api/v1/engagements/{2}/ --form \'scan_type=ZAP Scan\''.format(dojo_url, dojo_api_key, dojo_engagement_id, now.strftime("%Y-%m-%d") )
        foo = os.system(commit_to_dojo)
        print(os.system('ifconfig'))
        print(foo)

        # Stop ZAP
        zap.core.shutdown()


    except IOError as e:
        if hasattr(e, 'args') and len(e.args) > 1:
            errno, strerror = e
            print("ERROR " + str(strerror))
            logging.warning('I/O error(' + str(errno) + '): ' + str(strerror))
        else:
            print("ERROR %s" % e)
            logging.warning('I/O error: ' + str(e))
        dump_log_file(cid)

    except:
        print("ERROR " + str(sys.exc_info()[0]))
        logging.warning('Unexpected error: ' + str(sys.exc_info()[0]))
        dump_log_file(cid)

    if not running_in_docker():
        stop_docker(cid)

    if fail_count > 0:
        sys.exit(1)
    elif warn_count > 0:
        sys.exit(2)
    elif pass_count > 0:
        sys.exit(0)
    else:
        sys.exit(3)
def addPopData(site, atvk_id, name, pop):
    
    source_url = 'http://data.csb.gov.lv/pxweb/en/Sociala/Sociala__ikgad__iedz__iedzskaits'
    #population record date
    pop_day = 1
    pop_mon = 1
    pop_year = 2017
    #data access date
    access_day = 11
    access_mon = 9
    access_year = 2017
    
    sparql = "SELECT ?item WHERE { ?item wdt:P1115 '%s' . } LIMIT 1" % (atvk_id, )
    wd_pages = pagegenerators.WikidataSPARQLPageGenerator(sparql, site=wikidata)
    wd_pages = list(wd_pages)
    ##pprint( wd_pages )
    repo = site ##data repo
    
    for wd_page in wd_pages:

        if wd_page.exists():
            dictionary = wd_page.get()
            ## pprint( dictionary )
            ###if 'etwiki' in wd_page.sitelinks:
            pprint( wd_page )

            if not existingClaimFromYear(wd_page, pop_year, pop_mon, pop_day):
                time.sleep(10)
                
                population_claim = pywikibot.Claim(repo, 'P1082')
                population_claim.setTarget(pywikibot.WbQuantity(amount=pop)) #, error=1
                pywikibot.output('Adding %s --> %s' % (population_claim.getID(), population_claim.getTarget()))
                wd_page.addClaim(population_claim)
                    
                #time qualifier
                qualifier = pywikibot.Claim(repo, 'P585')
                pop_date = pywikibot.WbTime(year=pop_year, month=pop_mon, day=pop_day, precision='day')
                qualifier.setTarget(pop_date)
                population_claim.addQualifier(qualifier)

                #method qualifier       "rahvastikubilanss"
                qualifier = pywikibot.Claim(repo, 'P459')
                method = pywikibot.ItemPage(repo, 'Q15911027')
                qualifier.setTarget(method)
                population_claim.addQualifier(qualifier)
                                          
                #source as wiki page:   
                sourceWiki = pywikibot.Claim(repo, 'P248')
                sourceWiki.setTarget(pywikibot.ItemPage(repo, 'Q39420022'))              
                    
                #url as source
                source = pywikibot.Claim(repo, 'P854')
                source.setTarget(source_url)
                #vaadatud
                accessed = pywikibot.Claim(repo, 'P813')
                accessed_date = pywikibot.WbTime(year=access_year, month=access_mon, day=access_day, precision='day')
                accessed.setTarget(accessed_date)    
                        
                population_claim.addSources([sourceWiki, source, accessed])
                ## population_claim.addSources([source, accessed])

            else:
                print ("Population claim already exists "
                            "on %s for year %d, skipping") % (wd_page.title(), pop_year)
                logging.info ("Population claim already exists on %s" % (wd_page.title()))             


        else:
            print ('ERROR: NO DATA PAGE')
            logging.warning('[[%s]]: no data page in Wikidata' % (wd_page.title() ))
Example #52
0
 async def __call__(self, request):
     kw = None
     # 如果有需要处理的参数
     if self._has_var_kw_arg or self._has_named_kw_args or self._required_kw_args:
         if request.method == 'POST':
             if not request.content_type:
                 return web.HTTPBadRequest('Missing Content-Type.')
             ct = request.content_type.lower()
             if ct.startswith('application/json'):
                 params = await request.json()
                 if not isinstance(params, dict):
                     return web.HTTPBadRequest('JSON body must be object.')
                 kw = params
             elif ct.startswith('application/x-www-form-urlencoded'
                                ) or ct.startswith('multipart/form-data'):
                 params = await request.post()
                 kw = dict(**params)
             else:
                 return web.HTTPBadRequest('Unsupported Content-Tpe: %s' %
                                           request.content_type)
         if request.method == 'GET':
             qs = request.query_string
             if qs:
                 kw = dict()
                 '''
                 # 解析URL中?后面的键值对内容保存到request_content
                 qs = 'first=f,s&second=s'
                 parse.parse_qs(qs, True).items()
                 >>>dict([('first',['f,s']), ('second',['s'])])
                 '''
                 for k, v in parse.parse_qs(qs, True).items():
                     kw[k] = v[0]
     if kw is None:
         '''
         # 参数为空说明没有从request对象中获取到参数,或者URL处理函数没有参数
         def hello(request):
                 text = '<h1>hello, %s!</h1>' % request.match_info['name']
                 return web.Response()
         app.router.add_route('GET', '/hello/{name}', hello)
         '''
         '''
             if not self._has_var_kw_arg and not self._has_kw_arg and not self._required_kw_args:
             # 当URL处理函数没有参数的时候,将requqest.match_info设置为空,防止调用出错
             request_content = dict()
         '''
         kw = dict(**request.match_info)
     else:
         if not self._has_var_kw_arg and self._named_kw_args:
             # remove all unamed kw:
             copy = dict()
             for name in self._named_kw_args:
                 # 需要理解清楚的时候才不会出错
                 if name in kw:
                     copy[name] = kw[name]
             kw = copy
         # check named arg: 检查关键字参数的名字是否和match_info中的重复
         for k, v in request.match_info.items():
             if k in kw:
                 logging.warning(
                     'Duplicate arg name in named arg and kw args: %s' % k)
             kw[k] = v
     if self._has_request_arg:
         kw['request'] = request
     # check required kw: 检查是否有必须关键字参数
     if self._required_kw_args:
         for name in self._required_kw_args:
             if not name in kw:
                 return web.HTTPBadRequest('Missing argument: %s' % name)
     # 以上代码均是为了获取调用参数
     logging.info('call with args: %s' % str(kw))
     try:
         r = await self._func(**kw)
         return r
     except APIError as e:
         return dict(error=e.error, data=e.data, message=e.message)
Example #53
0
def prepare_timit(
    corpus_dir: Pathlike,
    output_dir: Optional[Pathlike] = None,
    num_phones: int = 48,
    num_jobs: int = 1,
) -> Dict[str, Dict[str, Union[RecordingSet, SupervisionSet]]]:
    """
    Returns the manifests which consists of the Recodings and Supervisions.
    :param corpus_dir: Pathlike, the path of the data dir.
    :param output_dir: Pathlike, the path where to write and save the manifests.
    :param num_phones: int=48, the number of phones (60, 48 or 39) for modeling and 48 is regarded as the default value.
    :return: a Dict whose key is the dataset part, and the value is Dicts with the keys 'audio' and 'supervisions'.
    """
    corpus_dir = Path(corpus_dir)
    assert corpus_dir.is_dir(), f"No such directory: {corpus_dir}"

    if output_dir is not None:
        output_dir = Path(output_dir)
        output_dir.mkdir(parents=True, exist_ok=True)

    manifests = defaultdict(dict)
    dataset_parts = ["TRAIN", "DEV", "TEST"]

    phones_dict = {}

    if num_phones in [60, 48, 39]:
        phones_dict = get_phonemes(num_phones)
    else:
        raise ValueError("The value of num_phones must be in [60, 48, 39].")

    dev_spks, test_spks = get_speakers()

    with ThreadPoolExecutor(num_jobs) as ex:
        for part in dataset_parts:
            wav_files = []

            if part == "TRAIN":
                print("starting....")
                wav_files = glob.glob(str(corpus_dir) + "/TRAIN/*/*/*.WAV")
                # filter the SA (dialect sentences)
                wav_files = list(
                    filter(lambda x: x.split("/")[-1][:2] != "SA", wav_files)
                )
            elif part == "DEV":
                wav_files = glob.glob(str(corpus_dir) + "/TEST/*/*/*.WAV")
                # filter the SA (dialect sentences)
                wav_files = list(
                    filter(lambda x: x.split("/")[-1][:2] != "SA", wav_files)
                )
                wav_files = list(
                    filter(lambda x: x.split("/")[-2].lower() in dev_spks, wav_files)
                )
            else:
                wav_files = glob.glob(str(corpus_dir) + "/TEST/*/*/*.WAV")
                # filter the SA (dialect sentences)
                wav_files = list(
                    filter(lambda x: x.split("/")[-1][:2] != "SA", wav_files)
                )
                wav_files = list(
                    filter(lambda x: x.split("/")[-2].lower() in test_spks, wav_files)
                )

            logging.debug(f"{part} dataset manifest generation.")
            recordings = []
            supervisions = []

            for wav_file in tqdm(wav_files):
                items = str(wav_file).strip().split("/")
                idx = items[-2] + "-" + items[-1][:-4]
                speaker = items[-2]
                transcript_file = Path(wav_file).with_suffix(".PHN")
                if not Path(wav_file).is_file():
                    logging.warning(f"No such file: {wav_file}")
                    continue
                if not Path(transcript_file).is_file():
                    logging.warning(f"No transcript: {transcript_file}")
                    continue
                text = []
                with open(transcript_file, "r") as f:
                    lines = f.readlines()
                    for line in lines:
                        phone = line.rstrip("\n").split(" ")[-1]
                        if num_phones != 60:
                            phone = phones_dict[str(phone)]
                        text.append(phone)

                    text = " ".join(text).replace("h#", "sil")

                recording = Recording.from_file(path=wav_file, recording_id=idx)
                recordings.append(recording)
                segment = SupervisionSegment(
                    id=idx,
                    recording_id=idx,
                    start=0.0,
                    duration=recording.duration,
                    channel=0,
                    language="English",
                    speaker=speaker,
                    text=text.strip(),
                )

                supervisions.append(segment)

                recording_set = RecordingSet.from_recordings(recordings)
                supervision_set = SupervisionSet.from_segments(supervisions)
                validate_recordings_and_supervisions(recording_set, supervision_set)

                if output_dir is not None:
                    supervision_set.to_json(output_dir / f"supervisions_{part}.json")
                    recording_set.to_json(output_dir / f"recordings_{part}.json")

                manifests[part] = {
                    "recordings": recording_set,
                    "supervisions": supervision_set,
                }

    return manifests
Example #54
0
def application(env, start_response):
    # start_response的值返回给headers
    # application函数的返回值作为body
    # application的调用过程参考002文件夹支持MSGI接口中的说明
    start_response('200 OK', [('Content-Type', 'text/html;charset=utf-8')])

    # web_server里面传过来的字典
    file_name = env['PATH_INFO']
    # file_name = "/index.py"
    '''
    if file_name == "/index.py":
        return index()
    elif file_name == "/center.py":
        return center()
    else:
        return 'Hello World! 我爱你中国....'
    '''

    # 此处添加日志记录功能,有浏览器访问,就进行记录访问信息
    # 打印日志的时间---打印当前执行程序名[ 打印日志的当前行号]---打印日志级别名称---打印日志信息
    LOG_FORMAT = "%(asctime)s---%(filename)s[line:%(lineno)d]---%(levelname)s: %(message)s"
    LOG_FH = [
        logging.FileHandler(filename='./log_information.log',
                            mode='a',
                            encoding='utf-8')
    ]
    logging.basicConfig(
        # filename='logging_record_information_1.log',
        level=logging.INFO,
        format=LOG_FORMAT,
        handlers=LOG_FH)
    logging.info("浏览器访问的网址名:%s" % file_name)

    # 可能存在动态请求的xxx.py不存在,因此加入try语句
    try:
        # func = URL_FUNC_DICT[file_name]
        # return func()
        # {
        #   "/index.py": index,
        #   "/center.py": center,
        # }
        # 上面的url就是存储的已经有的处理对应xxx.py的file_name,# {"/index.py": index}
        # 字典中的url是在装饰器参数中手动给定的,然后和传过来的file_name进行匹配,一摸一样就可以匹配出来结果ret
        # 字典中已经储存的url可以匹配到web_server传过来的env['PATH_INFO']的file_name
        # 就可以执行,没有的话就返回else
        for url, func in URL_FUNC_DICT.items():
            ret = re.match(url, file_name)
            if ret:
                # 正则结果传进去,处理函数需要用ret的就用,不需的就不用
                # index,center不需要使用,但是还是需要传进去,add_focus需要使用,传进去,然后取出使用
                return func(ret)
        # 请求的函数可能没有,就执行下面的语句
        else:
            # 记录没有对应函数的日志记录
            logging.warning("请求的URL(%s)没有对应的函数...") % file_name
            response = "请求的URL(%s)没有对应的函数..." % file_name
            return response

    # 其它错误都按以下处理,返回错误的原因
    except Exception as ret:
        # 由于上面header已经有start_response返回了,所以浏览器中查看请求状态是200,下面返回的只是body部分
        response = "------该动态请求未找到,请输入正确的xxx.py-----异常信息:%s" % ret
        return response
Example #55
0
def disable_tls_verification():
    os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
    requests.packages.urllib3.disable_warnings(category=urllib3.exceptions.InsecureRequestWarning)
    logging.warning('Disabled TLS verification')
    OpenshiftHttp.verify = False
Example #56
0
def corpus_bleu(sys_stream,
                ref_streams,
                smooth='exp',
                smooth_floor=0.0,
                force=False,
                lowercase=False,
                tokenize=DEFAULT_TOKENIZER,
                use_effective_order=False) -> BLEU:
    """Produces BLEU scores along with its sufficient statistics from a source against one or more references.
    :param sys_stream: The system stream (a sequence of segments)
    :param ref_streams: A list of one or more reference streams (each a sequence of segments)
    :param smooth: The smoothing method to use
    :param smooth_floor: For 'floor' smoothing, the floor to use
    :param force: Ignore data that looks already tokenized
    :param lowercase: Lowercase the data
    :param tokenize: The tokenizer to use
    :return: a BLEU object containing everything you'd want
    """
    # detokenize
    sys_stream = [' '.join(tokens) for tokens in sys_stream]
    ref_streams = [' '.join(tokens) for tokens in ref_streams]
    ref_streams = [ref_streams]

    sys_len = 0
    ref_len = 0

    correct = [0 for n in range(NGRAM_ORDER)]  #[0,0,0,0]
    total = [0 for n in range(NGRAM_ORDER)]  #[0,0,0,0]

    # look for already-tokenized sentences
    tokenized_count = 0

    fhs = [sys_stream] + ref_streams
    for lines in zip_longest(*fhs):
        if None in lines:
            raise EOFError(
                "Source and reference streams have different lengths!")

        if lowercase:
            lines = [x.lower() for x in lines]

        if not (force
                or tokenize == 'none') and lines[0].rstrip().endswith(' .'):
            tokenized_count += 1

            if tokenized_count == 100:
                logging.warning(
                    'That\'s 100 lines that end in a tokenized period (\'.\')')
                logging.warning(
                    'It looks like you forgot to detokenize your test data, which may hurt your score.'
                )
                logging.warning(
                    'If you insist your data is detokenized, or don\'t care, you can suppress this message with \'--force\'.'
                )

        output, *refs = [TOKENIZERS[tokenize](x.rstrip()) for x in lines]

        ref_ngrams, closest_diff, closest_len = ref_stats(output, refs)

        sys_len += len(output.split())
        ref_len += closest_len

        sys_ngrams = extract_ngrams(output)
        for ngram in sys_ngrams.keys():
            n = len(ngram.split())
            correct[n - 1] += min(sys_ngrams[ngram], ref_ngrams.get(ngram, 0))
            total[n - 1] += sys_ngrams[ngram]

    return compute_bleu(correct, total, sys_len, ref_len, smooth, smooth_floor,
                        use_effective_order)
Example #57
0
def stage0(task_path, result_path, temp_path=None, consume_task_folder=False):
    config = worker_config()
    cgs = ControlGroupSystem()
    task = KolejkaTask(task_path)
    if not task.id:
        task.id = uuid.uuid4().hex
        logging.warning('Assigned id {} to the task'.format(task.id))
    if not task.image:
        logging.error('Task does not define system image')
        sys.exit(1)
    if not task.args:
        logging.error('Task does not define args')
        sys.exit(1)
    if not task.files.is_local:
        logging.error('Task contains non-local files')
        sys.exit(1)
    limits = KolejkaLimits()
    limits.cpus = config.cpus
    limits.memory = config.memory
    limits.swap = config.swap
    limits.pids = config.pids
    limits.storage = config.storage
    limits.image = config.image
    limits.workspace = config.workspace
    limits.time = config.time
    limits.network = config.network
    limits.gpus = config.gpus
    task.limits.update(limits)

    docker_task = 'kolejka_worker_{}'.format(task.id)

    docker_cleanup = [
        ['docker', 'kill', docker_task],
        ['docker', 'rm', docker_task],
    ]

    with tempfile.TemporaryDirectory(dir=temp_path) as jailed_path:
        #TODO jailed_path size remains unlimited?
        logging.debug('Using {} as temporary directory'.format(jailed_path))
        jailed_task_path = os.path.join(jailed_path, 'task')
        os.makedirs(jailed_task_path, exist_ok=True)
        jailed_result_path = os.path.join(jailed_path, 'result')
        os.makedirs(jailed_result_path, exist_ok=True)

        jailed = KolejkaTask(os.path.join(jailed_path, 'task'))
        jailed.load(task.dump())
        jailed.files.clear()
        volumes = list()
        check_python_volume()
        if os.path.exists(OBSERVER_SOCKET):
            volumes.append((OBSERVER_SOCKET, OBSERVER_SOCKET, 'rw'))
        else:
            logging.warning('Observer is not running.')
        volumes.append(
            (jailed_result_path, os.path.join(WORKER_DIRECTORY,
                                              'result'), 'rw'))
        for key, val in task.files.items():
            if key != TASK_SPEC:
                src_path = os.path.join(task.path, val.path)
                dst_path = os.path.join(jailed_path, 'task', key)
                os.makedirs(os.path.dirname(dst_path), exist_ok=True)
                if consume_task_folder:
                    shutil.move(src_path, dst_path)
                else:
                    shutil.copy(src_path, dst_path)
                jailed.files.add(key)
        jailed.files.add(TASK_SPEC)
        #jailed.limits = KolejkaLimits() #TODO: Task is limited by docker, no need to limit it again?
        jailed.commit()
        volumes.append((jailed.path, os.path.join(WORKER_DIRECTORY,
                                                  'task'), 'rw'))
        if consume_task_folder:
            try:
                shutil.rmtree(task_path)
            except:
                logging.warning('Failed to remove {}'.format(task_path))
                pass
        for spath in [os.path.dirname(__file__)]:
            stage1 = os.path.join(spath, 'stage1.sh')
            if os.path.isfile(stage1):
                volumes.append(
                    (stage1, os.path.join(WORKER_DIRECTORY,
                                          'stage1.sh'), 'ro'))
                break
        for spath in [os.path.dirname(__file__)]:
            stage2 = os.path.join(spath, 'stage2.py')
            if os.path.isfile(stage2):
                volumes.append(
                    (stage2, os.path.join(WORKER_DIRECTORY,
                                          'stage2.py'), 'ro'))
                break

        docker_call = ['docker', 'run']
        docker_call += ['--detach']
        docker_call += ['--name', docker_task]
        docker_call += [
            '--entrypoint',
            os.path.join(WORKER_DIRECTORY, 'stage1.sh')
        ]
        for key, val in task.environment.items():
            docker_call += ['--env', '{}={}'.format(key, val)]
        docker_call += ['--hostname', WORKER_HOSTNAME]
        docker_call += ['--init']
        if task.limits.cpus is not None:
            docker_call += [
                '--cpuset-cpus', ','.join([
                    str(c) for c in cgs.limited_cpuset(cgs.full_cpuset(
                    ), task.limits.cpus, task.limits.cpus_offset)
                ])
            ]

        if task.limits.gpus is not None and task.limits.gpus > 0:
            check_gpu_runtime_availability()
            gpus = ','.join(
                map(
                    str,
                    limited_gpuset(full_gpuset(), task.limits.gpus,
                                   task.limits.gpus_offset)))
            docker_call += [
                '--runtime=nvidia', '--shm-size=1g', '--gpus',
                f'"device={gpus}"'
            ]

        if task.limits.memory is not None:
            docker_call += ['--memory', str(task.limits.memory)]
            if task.limits.swap is not None:
                docker_call += [
                    '--memory-swap',
                    str(task.limits.memory + task.limits.swap)
                ]
        if task.limits.storage is not None:
            docker_info_run = subprocess.run(
                ['docker', 'system', 'info', '--format', '{{json .Driver}}'],
                stdout=subprocess.PIPE,
                check=True)
            storage_driver = str(
                json.loads(str(docker_info_run.stdout, 'utf-8')))
            if storage_driver == 'overlay2':
                docker_info_run = subprocess.run([
                    'docker', 'system', 'info', '--format',
                    '{{json .DriverStatus}}'
                ],
                                                 stdout=subprocess.PIPE,
                                                 check=True)
                storage_fs = dict(
                    json.loads(str(docker_info_run.stdout,
                                   'utf-8')))['Backing Filesystem']
                if storage_fs in ['xfs']:
                    storage_limit = task.limits.storage
                    docker_call += [
                        '--storage-opt', 'size=' + str(storage_limit)
                    ]
                else:
                    logging.warning(
                        "Storage limit on {} ({}) is not supported".format(
                            storage_driver, storage_fs))
            else:
                logging.warning("Storage limit on {} is not supported".format(
                    storage_driver))
        if task.limits.network is not None:
            if not task.limits.network:
                docker_call += ['--network=none']
        docker_call += ['--cap-add', 'SYS_NICE']
        if task.limits.pids is not None:
            docker_call += ['--pids-limit', str(task.limits.pids)]
        if task.limits.time is not None:
            docker_call += [
                '--stop-timeout',
                str(int(math.ceil(task.limits.time.total_seconds())))
            ]
        docker_call += [
            '--volume',
            '{}:{}:{}'.format(WORKER_PYTHON_VOLUME,
                              os.path.join(WORKER_DIRECTORY, 'python3'), 'ro')
        ]
        for v in volumes:
            docker_call += [
                '--volume', '{}:{}:{}'.format(os.path.realpath(v[0]), v[1],
                                              v[2])
            ]
        docker_call += ['--workdir', WORKER_DIRECTORY]
        docker_image = task.image
        docker_call += [docker_image]
        docker_call += ['--consume']
        if config.debug:
            docker_call += ['--debug']
        if config.verbose:
            docker_call += ['--verbose']
        docker_call += [os.path.join(WORKER_DIRECTORY, 'task')]
        docker_call += [os.path.join(WORKER_DIRECTORY, 'result')]
        logging.debug('Docker call : {}'.format(docker_call))

        pull_image = config.pull
        if not pull_image:
            docker_inspect_run = subprocess.run(
                ['docker', 'image', 'inspect', docker_image],
                stdout=subprocess.DEVNULL,
                stderr=subprocess.STDOUT)
            if docker_inspect_run.returncode != 0:
                pull_image = True
        if pull_image:
            subprocess.run(['docker', 'pull', docker_image], check=True)

        for docker_clean in docker_cleanup:
            silent_call(docker_clean)

        if os.path.exists(result_path):
            shutil.rmtree(result_path)
        os.makedirs(result_path, exist_ok=True)
        result = KolejkaResult(result_path)
        result.id = task.id
        result.limits = task.limits
        result.stdout = task.stdout
        result.stderr = task.stderr

        start_time = datetime.datetime.now()
        docker_run = subprocess.run(docker_call, stdout=subprocess.PIPE)
        cid = str(docker_run.stdout, 'utf-8').strip()
        logging.info('Started container {}'.format(cid))

        try:
            if task.limits.gpus is not None and task.limits.gpus > 0:
                result.stats.update(
                    gpu_stats(gpus=limited_gpuset(full_gpuset(
                    ), task.limits.gpus, task.limits.gpus_offset)))
        except:
            pass
        time.sleep(0.1)

        while True:
            try:
                docker_state_run = subprocess.run(
                    ['docker', 'inspect', '--format', '{{json .State}}', cid],
                    stdout=subprocess.PIPE)
                state = json.loads(str(docker_state_run.stdout, 'utf-8'))
            except:
                break
            try:
                result.stats.update(cgs.name_stats(cid))

                if task.limits.gpus is not None and task.limits.gpus > 0:
                    result.stats.update(
                        gpu_stats(gpus=limited_gpuset(full_gpuset(
                        ), task.limits.gpus, task.limits.gpus_offset)))
            except:
                pass
            time.sleep(0.1)
            if not state['Running']:
                result.result = state['ExitCode']
                try:
                    result.stats.time = dateutil.parser.parse(
                        state['FinishedAt']) - dateutil.parser.parse(
                            state['StartedAt'])
                except:
                    result.stats.time = None
                break
            if task.limits.time is not None and datetime.datetime.now(
            ) - start_time > task.limits.time + datetime.timedelta(seconds=2):
                docker_kill_run = subprocess.run(
                    ['docker', 'kill', docker_task])
        subprocess.run(['docker', 'logs', cid], stdout=subprocess.PIPE)
        try:
            summary = KolejkaResult(jailed_result_path)
            result.stats.update(summary.stats)
        except:
            pass

        stop_time = datetime.datetime.now()
        if result.stats.time is None:
            result.stats.time = stop_time - start_time
        result.stats.pids.usage = None
        result.stats.memory.usage = None
        result.stats.memory.swap = None

        for dirpath, dirnames, filenames in os.walk(jailed_result_path):
            for filename in filenames:
                abspath = os.path.join(dirpath, filename)
                realpath = os.path.realpath(abspath)
                if realpath.startswith(
                        os.path.realpath(jailed_result_path) + '/'):
                    relpath = abspath[len(jailed_result_path) + 1:]
                    if relpath != RESULT_SPEC:
                        destpath = os.path.join(result.path, relpath)
                        os.makedirs(os.path.dirname(destpath), exist_ok=True)
                        shutil.move(realpath, destpath)
                        os.chmod(destpath, 0o640)
                        result.files.add(relpath)
        result.commit()
        os.chmod(result.spec_path, 0o640)

        for docker_clean in docker_cleanup:
            silent_call(docker_clean)
Example #58
0
    def get_or_create_table(self, project_id, dataset_id, table_id, schema,
                            create_disposition, write_disposition):
        """Gets or creates a table based on create and write dispositions.

    The function mimics the behavior of BigQuery import jobs when using the
    same create and write dispositions.

    Args:
      project_id: The project id owning the table.
      dataset_id: The dataset id owning the table.
      table_id: The table id.
      schema: A bigquery.TableSchema instance or None.
      create_disposition: CREATE_NEVER or CREATE_IF_NEEDED.
      write_disposition: WRITE_APPEND, WRITE_EMPTY or WRITE_TRUNCATE.

    Returns:
      A bigquery.Table instance if table was found or created.

    Raises:
      RuntimeError: For various mismatches between the state of the table and
        the create/write dispositions passed in. For example if the table is not
        empty and WRITE_EMPTY was specified then an error will be raised since
        the table was expected to be empty.
    """
        from apache_beam.io.gcp.bigquery import BigQueryDisposition

        found_table = None
        try:
            found_table = self.get_table(project_id, dataset_id, table_id)
        except HttpError as exn:
            if exn.status_code == 404:
                if create_disposition == BigQueryDisposition.CREATE_NEVER:
                    raise RuntimeError(
                        'Table %s:%s.%s not found but create disposition is CREATE_NEVER.'
                        % (project_id, dataset_id, table_id))
            else:
                raise

        # If table exists already then handle the semantics for WRITE_EMPTY and
        # WRITE_TRUNCATE write dispositions.
        if found_table:
            table_empty = self._is_table_empty(project_id, dataset_id,
                                               table_id)
            if (not table_empty
                    and write_disposition == BigQueryDisposition.WRITE_EMPTY):
                raise RuntimeError(
                    'Table %s:%s.%s is not empty but write disposition is WRITE_EMPTY.'
                    % (project_id, dataset_id, table_id))
            # Delete the table and recreate it (later) if WRITE_TRUNCATE was
            # specified.
            if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:
                self._delete_table(project_id, dataset_id, table_id)

        # Create a new table potentially reusing the schema from a previously
        # found table in case the schema was not specified.
        if schema is None and found_table is None:
            raise RuntimeError(
                'Table %s:%s.%s requires a schema. None can be inferred because the '
                'table does not exist.' % (project_id, dataset_id, table_id))
        if found_table and write_disposition != BigQueryDisposition.WRITE_TRUNCATE:
            return found_table
        else:
            created_table = self._create_table(project_id=project_id,
                                               dataset_id=dataset_id,
                                               table_id=table_id,
                                               schema=schema
                                               or found_table.schema)
            logging.info('Created table %s.%s.%s with schema %s. Result: %s.',
                         project_id, dataset_id, table_id, schema
                         or found_table.schema, created_table)
            # if write_disposition == BigQueryDisposition.WRITE_TRUNCATE we delete
            # the table before this point.
            if write_disposition == BigQueryDisposition.WRITE_TRUNCATE:
                # BigQuery can route data to the old table for 2 mins max so wait
                # that much time before creating the table and writing it
                logging.warning(
                    'Sleeping for 150 seconds before the write as ' +
                    'BigQuery inserts can be routed to deleted table ' +
                    'for 2 mins after the delete and create.')
                # TODO(BEAM-2673): Remove this sleep by migrating to load api
                time.sleep(150)
                return created_table
            else:
                return created_table
Example #59
0
 def close_spider(self, spider):
     logging.warning(datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S') + '----' + '爬虫结束'+ '\n')
Example #60
0
from data_retrieval.app_dataset import save_app_pt_df
from data_retrieval.manual_dataset import save_manual_pt_df
from data_retrieval.google_dataset import save_google_pt_df
import logging
import sys
import params

# create logger
FORMAT = '%(asctime)-15s %(message)s'
logging.basicConfig(format=FORMAT, level=logging.WARNING, stream=sys.stdout)

window_size = params.window_size
# 18 minutes needed to get app and manual pt_df
save_app_pt_df(window_size)
logging.warning("Finished processing app point data frame")
save_manual_pt_df(window_size)
# 4 hours 30 minutes needed to get google pt_df
save_google_pt_df(window_size)