Example #1
0
    def __init__(self, uuid):
        self._uuid = copy(BLEUUID.BASE_UUID_BYTES)

        if isinstance(uuid, UUID):
            # Assume that the UUID is correct
            self._uuid = bytearray(uuid.bytes)
        elif isinstance(uuid, bytes):
            self._uuid[2:4] = bytearray(bytes_to_native_str(uuid))
        elif isinstance(uuid, str):
            if len(uuid) == 4:
                # 16-bit UUID
                part = int(uuid, 16).to_bytes(2, 'little')
                self._uuid[2:4] = bytearray(part)
            elif len(uuid) == 8:
                # 32-bit UUID
                part = int(uuid, 16).to_bytes(4, 'little')
                self._uuid[0:4] = bytearray(part)
            elif len(uuid) == 36:
                # 128-bit UUID
                self._uuid = bytearray(UUID(uuid).bytes)
            else:
                raise ValueError("Invalid UUID")
        elif isinstance(uuid, int):
            if uuid < 65536:
                # 16-bit UUID
                part = int(uuid).to_bytes(2, 'little')
                self._uuid[2:4] = bytearray(part)
            elif uuid < 2**32:
                # 32-bit UUID
                part = int(uuid).to_bytes(4, 'little')
                self._uuid[0:4] = bytearray(part)
            else:
                raise ValueError("Invalid UUID")
        else:
            raise ValueError("Invalid UUID (type error)")
Example #2
0
    def __init__(self):
        self.quads_index = []

        with open(self.names_file, 'r') as fh:
            self.names = [name.strip() for name in fh]

        with open(self.quadsindex_file, 'r') as fh:
            indexes = []
            for index in fh:
                indexes += [n.strip() for n in index.split(' ') if n != '']

        self.lons_per_lat = dict(list(zip(
            self.quads_order,
            [indexes[x:x + 91] for x in range(0, len(indexes), 91)]
        )))

        self.lat_begins = {}

        for quad, index in list(self.lons_per_lat.items()):
            begin = 0
            end = -1
            begins = []
            n = 0

            for item in index:
                n += 1
                begin = end + 1
                begins.append(begin)
                end += int(item)

            self.lat_begins[quad] = begins

        self.lons = {}
        self.fenums = {}
        for quad, sect_file in zip(self.quads_order, self.sect_files):
            sect = []
            with open(sect_file, 'r') as fh:
                for line in fh:
                    sect += [int(v) for v in line.strip().split(' ')
                             if v != '']

            lons = []
            fenums = []
            n = 0
            for item in sect:
                n += 1
                if n % 2:
                    lons.append(item)
                else:
                    fenums.append(item)

            self.lons[quad] = lons
            self.fenums[quad] = fenums

        with open(self.numbers_file, 'rt') as csvfile:
            FE_csv = csv.reader(csvfile, delimiter=native_str(';'),
                                quotechar=native_str('#'),
                                skipinitialspace=True)
            self.by_number = \
                dict((int(row[0]), row[1]) for row in FE_csv if len(row) > 1)
Example #3
0
    def convert_result(self, value):
        if self.input.type == InputType.Click:
            parts = value.split(',')
            return int(parts[0]), int(parts[1])

        # TODO: convert based on input/output
        return value
Example #4
0
    def handle_noargs(self, **options):
        verbosity = int(options.get("verbosity", 0))
        interactive = int(options.get("interactive", 0))
        no_data = int(options.get("nodata", 0))
        if "conf_setting" in connection.introspection.table_names():
            raise CommandError("Database already created, you probably "
                               "want the syncdb or migrate command")

        syncdb.Command().execute(**options)
        if not interactive and not no_data:
            install_optional_data(verbosity)
        if "south" in settings.INSTALLED_APPS:
            try:
                from south.management.commands import migrate
            except ImportError:
                return
            if interactive:
                confirm = input("\nSouth is installed for this project."
                                    "\nWould you like to fake initial "
                                    "migrations? (yes/no): ")
                while True:
                    if confirm == "yes":
                        break
                    elif confirm == "no":
                        return
                    confirm = input("Please enter either 'yes' or 'no': ")
            if verbosity >= 1:
                print()
                print("Faking initial migrations ...")
                print()
            migrate.Command().execute(fake=True)
Example #5
0
    def get_package_stats(self, pid=None, root=None, owner=None):
        qry = (
            'SELECT p.pid, SUM(f.size) AS sizetotal, COUNT(f.fid) '
            'AS linkstotal, sizedone, linksdone FROM packages p JOIN files f '
            'ON p.pid = f.package AND f.dlstatus > 0 {0} LEFT OUTER '
            'JOIN (SELECT p.pid AS pid, SUM(f.size) AS sizedone, COUNT(f.fid) '
            'AS linksdone FROM packages p JOIN files f '
            'ON p.pid = f.package {0} AND f.dlstatus in (5,6) '
            'GROUP BY p.pid) s ON s.pid = p.pid GROUP BY p.pid')

        # status in (finished, skipped, processing)

        if root is not None:
            self.c.execute(qry.format(
                'AND (p.root=:root OR p.pid=:root)'), locals())
        elif pid is not None:
            self.c.execute(qry.format('AND p.pid=:pid'), locals())
        elif owner is not None:
            self.c.execute(qry.format('AND p.owner=:owner'), locals())
        else:
            self.c.execute(qry.format(''))

        data = {}
        for r in self.c.fetchall():
            data[r[0]] = PackageStats(
                r[2] if r[2] else 0,
                r[4] if r[4] else 0,
                int(r[1]) if r[1] else 0,
                int(r[3]) if r[3] else 0,
            )

        return data
Example #6
0
def _get_terminal_size_linux():
    def ioctl_GWINSZ(fd):
        try:
            import fcntl
            import termios
            cr = struct.unpack('hh',
                               fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
            return cr
        except Exception as e:
            log.error(e)
            pass

    cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2)
    if not cr:
        try:
            fd = os.open(os.ctermid(), os.O_RDONLY)
            cr = ioctl_GWINSZ(fd)
            os.close(fd)
        except Exception as e:
            log.error(e)
            pass
    if not cr:
        try:
            cr = (os.environ['LINES'], os.environ['COLUMNS'])
        except Exception as e:
            log.error(e)
            return None
    return int(cr[1]), int(cr[0])
Example #7
0
def factorize_int(x):
    """
    Calculate prime factorization of integer.

    Could be done faster but faster algorithm have much more lines of code and
    this is fast enough for our purposes.

    http://stackoverflow.com/questions/14550794/\
    python-integer-factorization-into-primes

    >>> factorize_int(1800004)
    [2, 2, 450001]
    >>> factorize_int(1800003)
    [3, 19, 23, 1373]
    """
    if x == 1:
        return [1]
    factors, limit, check, num = [], int(math.sqrt(x)) + 1, 2, x
    for check in range(2, limit):
        while num % check == 0:
            factors.append(check)
            num /= check
    if num > 1:
        factors.append(int(num))
    return factors
Example #8
0
    def __init__(
        self, manager, fid, name, size, filestatus, media, added,
            fileorder, url, pluginname, hash, status, error, package, owner):
        super(File, self).__init__()

        self.manager = manager
        self.pyload = manager.pyload

        self.fid = int(fid)
        self._name = purge.name(name)
        self._size = int(size)
        self.filestatus = filestatus
        self.media = media
        self.added = added
        self.fileorder = fileorder
        self.url = url
        self.pluginname = pluginname
        self.hash = hash
        self.status = status
        self.error = error
        self.owner = owner
        self.packageid = package
        # database information ends here

        self.lock = RWLock()

        self.plugin = None

        self.wait_until = 0  # time.time() + time to wait

        # status attributes
        self.abort = False
        self.reconnected = False
        self.statusname = None
Example #9
0
    def handle(self, **options):

        if "conf_setting" in connection.introspection.table_names():
            raise CommandError("Database already created, you probably "
                               "want the migrate command")

        self.verbosity = int(options.get("verbosity", 0))
        self.interactive = int(options.get("interactive", 0))
        self.no_data = int(options.get("nodata", 0))

        call_command("migrate", verbosity=self.verbosity,
                     interactive=self.interactive)

        mapping = [
            [self.create_site, ["django.contrib.sites"]],
            [self.create_user, ["django.contrib.auth"]],
            [self.translation_fields, ["modeltranslation"]],
            [self.create_pages, ["mezzanine.pages", "mezzanine.forms",
                                 "mezzanine.blog", "mezzanine.galleries"]],
            [self.create_shop, ["cartridge.shop"]],
        ]

        for func, apps in mapping:
            if set(apps).issubset(set(settings.INSTALLED_APPS)):
                func()
Example #10
0
  def ParseFromHumanReadable(self, string):
    """Parse a human readable string of a byte string.

    Args:
      string: The string to parse.

    Raises:
      DecodeError: If the string can not be parsed.
    """
    if not string:
      return None

    match = self.REGEX.match(string.strip().lower())
    if not match:
      raise DecodeError("Unknown specification for ByteSize %s" % string)

    multiplier = self.DIVIDERS.get(match.group(2))
    if not multiplier:
      raise DecodeError("Invalid multiplier %s" % match.group(2))

    # The value may be represented as a float, but if not dont lose accuracy.
    value = match.group(1)
    if "." in value:
      value = float(value)
    else:
      value = int(value)

    self._value = int(value * multiplier)
Example #11
0
 def getBox(self, angle,origin,final_shape):
     '''
     Given an angle, origin, and final image shape, this returns a single 2D image of a shape placed at the 'origin' and rotated to be facing 'angle'.
     
     '''
     image=np.zeros(final_shape)
     r=self.radius
     x0=int(np.round(origin[0]-r))
     y0=int(np.round(origin[1]-r))
     bbox=[x0,y0,x0+int(2*r),y0+int(2*r)]
     box=rotate(self.box,angle)
     outOfBounds=[0,0,0,0] #This tells us how many pixels out of bounds our box is, so we can crop it. 
     if bbox[0]<0:
         outOfBounds[0]=0-bbox[0]
         bbox[0]=0
     if bbox[1]<0:
         outOfBounds[1]=0-bbox[1]
         bbox[1]=0
     if bbox[2]>final_shape[0]:
         outOfBounds[2]=bbox[2]-final_shape[0]
         bbox[2]=final_shape[0]
     if bbox[3]>final_shape[1]:
         outOfBounds[3]=bbox[3]-final_shape[1]
         bbox[3]=final_shape[1]
     box=box[outOfBounds[0]:box.shape[0]-outOfBounds[2],outOfBounds[1]:box.shape[1]-outOfBounds[3]]
     image[bbox[0]:bbox[2],bbox[1]:bbox[3]]=box
     return image
Example #12
0
 def FormatAsHexStringTest(self):
   self.assertEqual(utils.FormatAsHexString(10), "0x1b")
   self.assertEqual(utils.FormatAsHexString(10, 4), "0x001b")
   self.assertEqual(utils.FormatAsHexString(10, 16), "0x000000000000001b")
   # No trailing "L".
   self.assertEqual(utils.FormatAsHexString(int(1e19)), "0x8ac7230489e80000")
   self.assertEqual(
       utils.FormatAsHexString(int(1e19), 5), "0x8ac7230489e80000")
Example #13
0
 def shrinkImage(self):
     img = self.ImageView.getProcessedImage()
     r = 1.5
     dim = (int(img.shape[1] / r), int(img.shape[0] / r)) 
     # perform the actual resizing of the image and show it
     img = cv2.resize(img, dim, interpolation = cv2.INTER_AREA)
     self.ImageView.setImage(img)        
     return
Example #14
0
File: util.py Project: kaeufl/obspy
def _convertDatetimeToMSTime(dt):
    """
    Takes a obspy.util.UTCDateTime object and returns an epoch time in ms.

    :param dt: obspy.util.UTCDateTime object.
    """
    _fsec, _sec = math.modf(dt.timestamp)
    return int(round(_fsec * HPTMODULUS)) + int(_sec * HPTMODULUS)
Example #15
0
    def flinnengdahl(self, lat, lon, rtype="both"):
        """
        Low-level interface for `flinnengdahl` Web service of IRIS
        (http://service.iris.edu/irisws/flinnengdahl/) - release 1.1
        (2011-06-08).

        This method converts a latitude, longitude pair into either a
        `Flinn-Engdahl <http://en.wikipedia.org/wiki/Flinn-Engdahl_regions>`_
        seismic region code or region name.

        :type lat: float
        :param lat: Latitude of interest.
        :type lon: float
        :param lon: Longitude of interest.
        :type rtype: ``'code'``, ``'region'`` or ``'both'``
        :param rtype: Return type. Defaults to ``'both'``.
        :rtype: int, str, or tuple
        :returns: Returns Flinn-Engdahl region code or name or both, depending
            on the request type parameter ``rtype``.

        .. rubric:: Examples

        >>> from obspy.iris import Client
        >>> client = Client()
        >>> client.flinnengdahl(lat=-20.5, lon=-100.6, rtype="code")
        683

        >>> print(client.flinnengdahl(lat=42, lon=-122.24, rtype="region"))
        OREGON

        >>> code, region = client.flinnengdahl(lat=-20.5, lon=-100.6)
        >>> print(code, region)
        683 SOUTHEAST CENTRAL PACIFIC OCEAN
        """
        service = 'flinnengdahl'
        # check rtype
        try:
            if rtype == 'code':
                param_list = ["output=%s" % rtype, "lat=%s" % lat,
                              "lon=%s" % lon]
                return int(self._fetch(service, param_list=param_list))
            elif rtype == 'region':
                param_list = ["output=%s" % rtype, "lat=%s" % lat,
                              "lon=%s" % lon]
                return self._fetch(service,
                                   param_list=param_list).strip().decode()
            else:
                param_list = ["output=code", "lat=%s" % lat,
                              "lon=%s" % lon]
                code = int(self._fetch(service, param_list=param_list))
                param_list = ["output=region", "lat=%s" % lat,
                              "lon=%s" % lon]
                region = self._fetch(service, param_list=param_list).strip()
                return (code, region.decode())
        except compatibility.HTTPError as e:
            msg = "No Flinn-Engdahl data available (%s: %s)"
            msg = msg % (e.__class__.__name__, e)
            raise Exception(msg)
Example #16
0
        def pbkdf2_hmac(hash_name, password, salt, iterations, dklen=None):
            """Password based key derivation function 2 (PKCS #5 v2.0)

            This Python implementations based on the hmac module about
            as fast as OpenSSL's PKCS5_PBKDF2_HMAC for short passwords
            and much faster for long passwords.

            """
            if not isinstance(hash_name, str):
                raise TypeError(hash_name)

            if not isinstance(password, (bytes, bytearray)):
                password = bytes(buffer(password))
            if not isinstance(salt, (bytes, bytearray)):
                salt = bytes(buffer(salt))

            # Fast inline HMAC implementation
            inner = new(hash_name)
            outer = new(hash_name)
            blocksize = getattr(inner, 'block_size', 64)
            if len(password) > blocksize:
                password = new(hash_name, password).digest()
            password = password + b'\x00' * (blocksize - len(password))
            inner.update(password.translate(_trans_36))
            outer.update(password.translate(_trans_5C))

            def prf(msg, inner=inner, outer=outer):
                # PBKDF2_HMAC uses the password as key. We can re-use the same
                # digest objects and just update copies to skip initialization.
                icpy = inner.copy()
                ocpy = outer.copy()
                icpy.update(msg)
                ocpy.update(icpy.digest())
                return ocpy.digest()

            if iterations < 1:
                raise ValueError(iterations)
            if dklen is None:
                dklen = outer.digest_size
            if dklen < 1:
                raise ValueError(dklen)

            hex_format_string = '%%0%ix' % (new(hash_name).digest_size * 2)

            dkey = b''
            loop = 1
            while len(dkey) < dklen:
                prev = prf(salt + struct.pack(b'>I', loop))
                rkey = int(binascii.hexlify(prev), 16)
                for _i in range(iterations - 1):
                    prev = prf(prev)
                    rkey ^= int(binascii.hexlify(prev), 16)
                loop += 1
                dkey += binascii.unhexlify(hex_format_string % rkey)

            return dkey[:dklen]
Example #17
0
def _get_terminal_size_tput():
    # get terminal width
    # src: http://stackoverflow.com/questions/263890/how-do-i-find-the-width-height-of-a-terminal-window
    try:
        cols = int(subprocess.check_call(shlex.split('tput cols')))
        rows = int(subprocess.check_call(shlex.split('tput lines')))
        return (cols, rows)
    except Exception as e:
        log.error(e)
        pass
Example #18
0
def timeparse(sval, granularity='seconds'):
    '''
    Parse a time expression, returning it as a number of seconds.  If
    possible, the return value will be an `int`; if this is not
    possible, the return will be a `float`.  Returns `None` if a time
    expression cannot be parsed from the given string.

    Arguments:
    - `sval`: the string value to parse

    >>> timeparse('1:24')
    84
    >>> timeparse(':22')
    22
    >>> timeparse('1 minute, 24 secs')
    84
    >>> timeparse('1m24s')
    84
    >>> timeparse('1.2 minutes')
    72
    >>> timeparse('1.2 seconds')
    1.2
    
    If granularity is specified as ``minutes``, then ambiguous digits following
    a colon will be interpreted as minutes; otherwise they are considered seconds.
    
    >>> timeparse('1:30')
    90
    >>> timeparse('1:30', granularity='minutes')
    5400
    '''
    for timefmt in TIMEFORMATS:
        match = re.match(r'\s*' + timefmt + r'\s*$', sval, re.I)
        if match and match.group(0).strip():
            mdict = match.groupdict()
            if granularity == 'minutes':
                mdict = _interpret_as_minutes(sval, mdict)
            # if all of the fields are integer numbers
            if all(v.isdigit() for v in list(mdict.values()) if v):
                return sum([MULTIPLIERS[k] * int(v, 10) for (k, v) in
                            list(mdict.items()) if v is not None])
            # if SECS is an integer number
            elif ('secs' not in mdict or
                  mdict['secs'] is None or
                  mdict['secs'].isdigit()):
                # we will return an integer
                return (
                    int(sum([MULTIPLIERS[k] * float(v) for (k, v) in
                             list(mdict.items()) if k != 'secs' and v is not None])) +
                    (int(mdict['secs'], 10) if mdict['secs'] else 0))
            else:
                # SECS is a float, we will return a float
                return sum([MULTIPLIERS[k] * float(v) for (k, v) in
                            list(mdict.items()) if v is not None])
Example #19
0
 def convert(self, value):
     try:
         if isinstance(value, list):
             return [int(_i) for _i in value]
         else:
             return int(value)
     except:
         if not self.strict:
             return self.default_value
         msg = "No integer value found for %s." % self.attribute_name
         raise SEEDTypeException(msg)
Example #20
0
 def get_download_limit(self):
     if self.account:
         limit = self.account.options.get('limitDL', 0)
         if limit == '':
             limit = 0
         if self.limit_dl > 0:  # a limit is already set, we use the minimum
             return min(int(limit), self.limit_dl)
         else:
             return int(limit)
     else:
         return self.limit_dl
Example #21
0
def array_transff_freqslowness(coords, slim, sstep, fmin, fmax, fstep, coordsys="lonlat"):
    """
    Returns array transfer function as a function of slowness difference and
    frequency.

    :type coords: numpy.ndarray
    :param coords: coordinates of stations in longitude and latitude in degrees
        elevation in km, or x, y, z in km
    :type coordsys: string
    :param coordsys: valid values: 'lonlat' and 'xy', choose which coordinates
        to use
    :param slim: either a float to use symmetric limits for slowness
        differences or the tupel (sxmin, sxmax, symin, symax)
    :type fmin: double
    :param fmin: minimum frequency in signal
    :type fmax: double
    :param fmin: maximum frequency in signal
    :type fstep: double
    :param fmin: frequency sample distance
    """
    coords = get_geometry(coords, coordsys)
    if isinstance(slim, float):
        sxmin = -slim
        sxmax = slim
        symin = -slim
        symax = slim
    elif isinstance(slim, tuple):
        if len(slim) == 4:
            sxmin = slim[0]
            sxmax = slim[1]
            symin = slim[2]
            symax = slim[3]
    else:
        raise TypeError("slim must either be a float or a tuple of length 4")

    nsx = int(np.ceil((sxmax + sstep / 10.0 - sxmin) / sstep))
    nsy = int(np.ceil((symax + sstep / 10.0 - symin) / sstep))
    nf = int(np.ceil((fmax + fstep / 10.0 - fmin) / fstep))

    transff = np.empty((nsx, nsy))
    buff = np.zeros(nf)

    for i, sx in enumerate(np.arange(sxmin, sxmax + sstep / 10.0, sstep)):
        for j, sy in enumerate(np.arange(symin, symax + sstep / 10.0, sstep)):
            for k, f in enumerate(np.arange(fmin, fmax + fstep / 10.0, fstep)):
                _sum = 0j
                for l in np.arange(len(coords)):
                    _sum += np.exp(complex(0.0, (coords[l, 0] * sx + coords[l, 1] * sy) * 2 * np.pi * f))
                buff[k] = abs(_sum) ** 2
            transff[i, j] = cumtrapz(buff, dx=fstep)[-1]

    transff /= transff.max()
    return transff
Example #22
0
 def parseSEED(self, data, expected_length=0):
     """
     If number of FIR coefficients are larger than maximal blockette size of
     9999 chars a follow up blockette with the same blockette id and
     response lookup key is expected - this is checked here.
     """
     # convert to stream for test issues
     if isinstance(data, bytes):
         expected_length = len(data)
         data = compatibility.BytesIO(data)
     elif isinstance(data, (str, native_str)):
         raise TypeError("Data must be bytes, not string")
     # get current lookup key
     pos = data.tell()
     data.read(7)
     global_lookup_key = int(data.read(4))
     data.seek(pos)
     # read first blockette
     temp = compatibility.BytesIO()
     temp.write(data.read(expected_length))
     # check next blockettes
     while True:
         # save position
         pos = data.tell()
         try:
             blockette_id = int(data.read(3))
         except ValueError:
             break
         if blockette_id != 41:
             # different blockette id -> break
             break
         blockette_length = int(data.read(4))
         lookup_key = int(data.read(4))
         if lookup_key != global_lookup_key:
             # different lookup key -> break
             break
         # ok follow up blockette found - skip some unneeded fields
         self.fields[1].read(data)
         self.fields[2].read(data)
         self.fields[3].read(data)
         self.fields[4].read(data)
         self.fields[5].read(data)
         # remaining length in current blockette
         length = pos - data.tell() + blockette_length
         # read follow up blockette and append it to temporary blockette
         temp.write(data.read(length))
     # reposition file pointer
     data.seek(pos)
     # parse new combined temporary blockette
     temp.seek(0, os.SEEK_END)
     _len = temp.tell()
     temp.seek(0)
     Blockette.parseSEED(self, temp, expected_length=_len)
Example #23
0
File: util.py Project: kaeufl/obspy
def nearestPow2(x):
    """
    Finds the nearest integer that is a power of 2.
    In contrast to :func:`nextpow2` also searches for numbers smaller than the
    input and returns them if they are closer than the next bigger power of 2.
    """
    a = M.pow(2, M.ceil(M.log(x, 2)))
    b = M.pow(2, M.floor(M.log(x, 2)))
    if abs(a - x) < abs(b - x):
        return int(a)
    else:
        return int(b)
Example #24
0
    def api_node_moved_view(self, request):
        """
        Update the position of a node, from a API request.
        """
        try:
            moved_id = int(request.POST['moved_id'])
            target_id = int(request.POST['target_id'])
            position = request.POST['position']
            previous_parent_id = int(request.POST['previous_parent_id']) or None

            # Not using .non_polymorphic() so all models are downcasted to the derived model.
            # This causes the signal below to be emitted from the proper class as well.
            moved = self.model.objects.get(pk=moved_id)
            target = self.model.objects.get(pk=target_id)
        except (ValueError, KeyError) as e:
            return HttpResponseBadRequest(json.dumps({'action': 'foundbug', 'error': str(e[0])}), content_type='application/json')
        except self.model.DoesNotExist as e:
            return HttpResponseNotFound(json.dumps({'action': 'reload', 'error': str(e[0])}), content_type='application/json')

        if not self.can_have_children(target) and position == 'inside':
            return HttpResponse(json.dumps({
                'action': 'reject',
                'moved_id': moved_id,
                'error': _(u'Cannot place \u2018{0}\u2019 below \u2018{1}\u2019; a {2} does not allow children!').format(moved, target, target._meta.verbose_name)
            }), content_type='application/json', status=409)  # Conflict
        if moved.parent_id != previous_parent_id:
            return HttpResponse(json.dumps({
                'action': 'reload',
                'error': 'Client seems to be out-of-sync, please reload!'
            }), content_type='application/json', status=409)

        # TODO: with granular user permissions, check if user is allowed to edit both pages.

        mptt_position = {
            'inside': 'first-child',
            'before': 'left',
            'after': 'right',
        }[position]
        moved.move_to(target, mptt_position)

        # Some packages depend on calling .save() or post_save signal after updating a model.
        # This is required by django-fluent-pages for example to update the URL caches.
        moved.save()

        # Report back to client.
        return HttpResponse(json.dumps({
            'action': 'success',
            'error': None,
            'moved_id': moved_id,
            'action_column': self.actions_column(moved),
        }), content_type='application/json')
Example #25
0
def get_filterdate(filterDate, dateTime):
    """
    Get filterdate.
    """

    returnvalue = ""
    dateYear = strftime("%Y", gmtime(dateTime))
    dateMonth = strftime("%m", gmtime(dateTime))
    dateDay = strftime("%d", gmtime(dateTime))
    if filterDate == (
        "today"
        and int(dateYear) == int(localtime()[0])
        and int(dateMonth) == int(localtime()[1])
        and int(dateDay) == int(localtime()[2])
    ):
        returnvalue = "true"
    elif filterDate == "thismonth" and dateTime >= time() - 2592000:
        returnvalue = "true"
    elif filterDate == "thisyear" and int(dateYear) == int(localtime()[0]):
        returnvalue = "true"
    elif filterDate == "past7days" and dateTime >= time() - 604800:
        returnvalue = "true"
    elif filterDate == "":
        returnvalue = "true"
    return returnvalue
Example #26
0
 def clean_card_expiry_year(self):
     """
     Ensure the card expiry doesn't occur in the past.
     """
     try:
         month = int(self.cleaned_data["card_expiry_month"])
         year = int(self.cleaned_data["card_expiry_year"])
     except ValueError:
         # Haven't reached payment step yet.
         return
     n = now()
     if year == n.year and month < n.month:
         raise forms.ValidationError(_("A valid expiry date is required."))
     return str(year)
Example #27
0
File: core.py Project: kaeufl/obspy
def toUTCDateTime(value):
    """
    Converts time string used within Seismic Handler into a UTCDateTime.

    :type value: str
    :param value: A Date time string.
    :return: Converted :class:`~obspy.core.UTCDateTime` object.

    .. rubric:: Example

    >>> toUTCDateTime(' 2-JAN-2008_03:04:05.123')
    UTCDateTime(2008, 1, 2, 3, 4, 5, 123000)
    >>> toUTCDateTime('2-JAN-2008')
    UTCDateTime(2008, 1, 2, 0, 0)
    >>> toUTCDateTime('2-JAN-08')
    UTCDateTime(2008, 1, 2, 0, 0)
    >>> toUTCDateTime('2-JAN-99')
    UTCDateTime(1999, 1, 2, 0, 0)
    >>> toUTCDateTime('2-JAN-2008_1')
    UTCDateTime(2008, 1, 2, 1, 0)
    >>> toUTCDateTime('2-JAN-2008_1:1')
    UTCDateTime(2008, 1, 2, 1, 1)
    """
    try:
        date, time = value.split('_')
    except ValueError:
        date = value
        time = "0:0:0"
    day, month, year = date.split('-')
    time = time.split(':')
    try:
        hour, mins, secs = time
    except ValueError:
        hour = time[0]
        mins = "0"
        secs = "0"
        if len(time) == 2:
            mins = time[1]
    day = int(day)
    month = MONTHS.index(month.upper()) + 1
    if len(year) == 2:
        if int(year) < 70:
            year = "20" + year
        else:
            year = "19" + year
    year = int(year)
    hour = int(hour)
    mins = int(mins)
    secs = float(secs)
    return UTCDateTime(year, month, day, hour, mins) + secs
Example #28
0
File: util.py Project: kaeufl/obspy
def smooth(x, smoothie):
    """
    Smoothes a given signal by computing a central moving average.

    :param x: signal to smooth
    :param smoothie: number of past/future values to calculate moving average
    :return out: smoothed signal
    """
    size_x = np.size(x)
    if smoothie > 0:
        if (len(x) > 1 and len(x) < size_x):
            #out_add = append(append([x[0,:]]*smoothie,x,axis=0),
            #                     [x[(len(x)-1),:]]*smoothie,axis=0)
            #out_add = (np.append([x[0, :]]*int(smoothie), x, axis=0))
            out_add = np.vstack(([x[0, :]] * int(smoothie), x,
                                 [x[(len(x) - 1), :]] * int(smoothie)))
            help = np.transpose(out_add)
            #out = signal.lfilter(np.ones(smoothie) / smoothie, 1, help)
            out = signal.lfilter(
                np.hstack((np.ones(smoothie) / (2 * smoothie), 0,
                          np.ones(smoothie) / (2 * smoothie))), 1, help)
            out = np.transpose(out)
            #out = out[smoothie:len(out), :]
            out = out[2 * smoothie:len(out), :]
            #out = filter(ones(1,smoothie)/smoothie,1,out_add)
            #out[1:smoothie,:] = []
        else:
            #out_add = np.append(np.append([x[0]] * smoothie, x),
            #                   [x[size_x - 1]] * smoothie)
            out_add = np.hstack(([x[0]] * int(smoothie), x,
                                 [x[(len(x) - 1)]] * int(smoothie)))
            out = signal.lfilter(np.hstack((
                np.ones(smoothie) / (2 * smoothie), 0,
                np.ones(smoothie) / (2 * smoothie))), 1, out_add)
            out = out[2 * smoothie:len(out)]
            out[0:smoothie] = out[smoothie]
            out[len(out) - smoothie:len(out)] = out[len(out) - smoothie - 1]
            #for i in xrange(smoothie, len(x) + smoothie):
            #    sum = 0
            #    for k in xrange(-smoothie, smoothie):
            #        sum = sum + out_add[i + k]
            #        suma[i - smoothie] = float(sum) / (2 * smoothie)
            #        out = suma
            #        out[0:smoothie] = out[smoothie]
            #        out[size_x - 1 - smoothie:size_x] = \
            #            out[size_x - 1 - smoothie]
    else:
        out = x
    return out
Example #29
0
def logbankm(p, n, fs, w):
    """
    Matrix for a log-spaced filterbank.

    Computes a matrix containing the filterbank amplitudes for a log-spaced
    filterbank.

    :param p: Number of filters in filterbank.
    :param n: Length of fft.
    :param fs: Sampling frequency in Hz.
    :param w: Window function.
    :return: **xx, yy, zz** - Matrix containing the filterbank amplitudes,
        Lowest fft bin with a non-zero coefficient, Highest fft bin with a
        non-zero coefficient.
    """
    # alternative to avoid above problems: low end of the lowest filter
    # corresponds to maximum frequency resolution
    fn2 = np.floor(n / 2)
    fl = np.floor(fs) / np.floor(n)
    fh = np.floor(fs / 2)
    lr = np.log((fh) / (fl)) / (p + 1)
    bl = n * ((fl) *
              np.exp(np.array([0, 1, p, p + 1]) * float(lr)) / float(fs))
    b2 = int(np.ceil(bl[1]))
    b3 = int(np.floor(bl[2]))
    b1 = int(np.floor(bl[0])) + 1
    b4 = int(min(fn2, np.ceil(bl[3]))) - 1
    pf = np.log(((np.arange(b1 - 1, b4 + 1, dtype='f8') / n) * fs) / (fl)) / lr
    fp = np.floor(pf)
    pm = pf - fp
    k2 = b2 - b1 + 1
    k3 = b3 - b1 + 1
    k4 = b4 - b1 + 1
    r = np.append(fp[k2:k4 + 2], 1 + fp[1:k3 + 1]) - 1
    c = np.append(np.arange(k2, k4 + 1), np.arange(1, k3 + 1)) - 1
    v = 2 * np.append([1 - pm[k2:k4 + 1]], [pm[1:k3 + 1]])
    mn = b1 + 1
    mx = b4 + 1
    #x = np.array([[c],[r]], dtype=[('x', 'float'), ('y', 'float')])
    #ind=np.argsort(x, order=('x','y'))
    if (w == 'Hann'):
        v = 1. - [np.cos([v * float(np.pi / 2.)])]
    elif (w == 'Hamming'):
        v = 1. - 0.92 / 1.08 * np.cos(v * float(np.pi / 2))
    # bugfix for #70 - scipy.sparse.csr_matrix() delivers sometimes a
    # transposed matrix depending on the installed NumPy version - using
    # scipy.sparse.coo_matrix() ensures compatibility with old NumPy versions
    xx = sparse.coo_matrix((v, (c, r))).transpose().todense()
    return xx, mn - 1, mx - 1
Example #30
0
def scoreatpercentile(values, per, limit=(), issorted=True):
    """
    Calculates the score at the given per percentile of the sequence a.

    For example, the score at ``per=50`` is the median.

    If the desired quantile lies between two data points, we interpolate
    between them.

    If the parameter ``limit`` is provided, it should be a tuple (lower,
    upper) of two values.  Values of ``a`` outside this (closed) interval
    will be ignored.

    .. rubric:: Examples

    >>> a = [1, 2, 3, 4]
    >>> scoreatpercentile(a, 25)
    1.75
    >>> scoreatpercentile(a, 50)
    2.5
    >>> scoreatpercentile(a, 75)
    3.25

    >>> a = [6, 47, 49, 15, 42, 41, 7, 255, 39, 43, 40, 36, 500]
    >>> scoreatpercentile(a, 25, limit=(0, 100))
    25.5
    >>> scoreatpercentile(a, 50, limit=(0, 100))
    40
    >>> scoreatpercentile(a, 75, limit=(0, 100))
    42.5

    This function is taken from :func:`scipy.stats.scoreatpercentile`.

    Copyright (c) Gary Strangman
    """
    if limit:
        values = [v for v in values if limit[0] < v < limit[1]]

    if issorted:
        values = sorted(values)

    def _interpolate(a, b, fraction):
        return a + (b - a) * fraction

    idx = per / 100. * (len(values) - 1)
    if (idx % 1 == 0):
        return values[int(idx)]
    else:
        return _interpolate(values[int(idx)], values[int(idx) + 1], idx % 1)
Example #31
0
def availspace(path):
    if os.name != 'nt':
        stat = os.statvfs(path)
        res = stat.f_frsize * stat.f_bavail
    else:
        import ctypes
        free_bytes = ctypes.c_ulonglong(0)
        ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path),
                                                   None, None,
                                                   ctypes.pointer(free_bytes))
        res = int(free_bytes.value)
    return res
Example #32
0
def dashboard_column(context, token):
    """
    Takes an index for retrieving the sequence of template tags from
    ``mezzanine.conf.DASHBOARD_TAGS`` to render into the admin
    dashboard.
    """
    column_index = int(token.split_contents()[1])
    output = []
    for tag in settings.DASHBOARD_TAGS[column_index]:
        t = Template("{%% load %s %%}{%% %s %%}" % tuple(tag.split(".")))
        output.append(t.render(Context(context)))
    return "".join(output)
def blog_post_list_index(request,
                         tag=None,
                         year=None,
                         month=None,
                         username=None,
                         category=None,
                         template="index.html",
                         extra_context=None):
    """
    Display a list of blog posts that are filtered by tag, year, month,
    author or category. Custom templates are checked for using the name
    ``blog/blog_post_list_XXX.html`` where ``XXX`` is either the
    category slug or author's username if given.
    """
    templates = []
    blog_posts = BlogPost.objects.published(for_user=request.user)
    if tag is not None:
        tag = get_object_or_404(Keyword, slug=tag)
        blog_posts = blog_posts.filter(keywords__keyword=tag)
    if year is not None:
        blog_posts = blog_posts.filter(publish_date__year=year)
        if month is not None:
            blog_posts = blog_posts.filter(publish_date__month=month)
            try:
                month = _(month_name[int(month)])
            except IndexError:
                raise Http404()
    if category is not None:
        category = get_object_or_404(BlogCategory, slug=category)
        blog_posts = blog_posts.filter(categories=category)
        templates.append(u"index_%s.html" % str(category.slug))
    author = None
    if username is not None:
        author = get_object_or_404(User, username=username)
        blog_posts = blog_posts.filter(user=author)
        templates.append(u"index_%s.html" % username)

    prefetch = ("categories", "keywords__keyword")
    blog_posts = blog_posts.select_related("user").prefetch_related(*prefetch)
    blog_posts = paginate(blog_posts, request.GET.get("page", 1),
                          settings.BLOG_POST_PER_PAGE,
                          settings.MAX_PAGING_LINKS)
    context = {
        "blog_posts": blog_posts,
        "year": year,
        "month": month,
        "tag": tag,
        "category": category,
        "author": author
    }
    context.update(extra_context or {})
    templates.append(template)
    return TemplateResponse(request, templates, context)
Example #34
0
    def markup(self, text, escape=None, funcs={}, classes={}, methods={}):
        """Mark up some plain text, given a context of symbols to look for.
        Each context dictionary maps object names to anchor names."""
        escape = escape or self.escape
        results = []
        here = 0

        # XXX Note that this regular expression does not allow for the
        # hyperlinking of arbitrary strings being used as method
        # names. Only methods with names consisting of word characters
        # and '.'s are hyperlinked.
        pattern = re.compile(r'\b((http|ftp)://\S+[\w/]|'
                                r'RFC[- ]?(\d+)|'
                                r'PEP[- ]?(\d+)|'
                                r'(self\.)?((?:\w|\.)+))\b')
        while 1:
            match = pattern.search(text, here)
            if not match: break
            start, end = match.span()
            results.append(escape(text[here:start]))

            all, scheme, rfc, pep, selfdot, name = match.groups()
            if scheme:
                url = escape(all).replace('"', '&quot;')
                results.append('<a href="%s">%s</a>' % (url, url))
            elif rfc:
                url = 'http://www.rfc-editor.org/rfc/rfc%d.txt' % int(rfc)
                results.append('<a href="%s">%s</a>' % (url, escape(all)))
            elif pep:
                url = 'http://www.python.org/dev/peps/pep-%04d/' % int(pep)
                results.append('<a href="%s">%s</a>' % (url, escape(all)))
            elif text[end:end+1] == '(':
                results.append(self.namelink(name, methods, funcs, classes))
            elif selfdot:
                results.append('self.<strong>%s</strong>' % name)
            else:
                results.append(self.namelink(name, classes))
            here = end
        results.append(escape(text[here:]))
        return ''.join(results)
Example #35
0
    def handle_import(self, options):

        tumblr_user = options.get("tumblr_user")
        if tumblr_user is None:
            raise CommandError("Usage is import_tumblr %s" % self.args)
        verbosity = int(options.get("verbosity", 1))
        json_url = "http://%s.tumblr.com/api/read/json" % tumblr_user
        json_start = "var tumblr_api_read ="
        date_format = "%a, %d %b %Y %H:%M:%S"
        start_index = 0

        while True:
            retries = MAX_RETRIES_PER_CALL
            try:
                call_url = "%s?start=%s" % (json_url, start_index)
                if verbosity >= 2:
                    print("Calling %s" % call_url)
                response = urlopen(call_url)
                if response.code == 404:
                    raise CommandError("Invalid Tumblr user.")
                elif response.code == 503:
                    # The Tumblr API is frequently unavailable so make a
                    # few tries, pausing between each.
                    retries -= 1
                    if not retries:
                        error = "Tumblr API unavailable, try again shortly."
                        raise CommandError(error)
                    sleep(3)
                    continue
                elif response.code != 200:
                    raise IOError("HTTP status %s" % response.code)
            except IOError as e:
                error = "Error communicating with Tumblr API (%s)" % e
                raise CommandError(error)

            data = response.read()
            json = loads(data.split(json_start, 1)[1].strip().rstrip(";"))
            posts = json["posts"]
            start_index += MAX_POSTS_PER_CALL

            for post in posts:
                handler = getattr(self, "handle_%s_post" % post["type"])
                if handler is not None:
                    title, content = handler(post)
                    pub_date = datetime.strptime(post["date"], date_format)
                    self.add_post(title=title,
                                  content=content,
                                  pub_date=pub_date,
                                  tags=post.get("tags"),
                                  old_url=post["url-with-slug"])
            if len(posts) < MAX_POSTS_PER_CALL:
                break
Example #36
0
    def update(self):
        self.display = self.content
        curTime = time() // 1
        offset = max(int((curTime - self.START) % len(self.content)), 0)
        # while offset > 0:
        #     if self.display[0] > chr(127):
        #         offset -= 1
        #         self.display = self.display[3:] + self.display[:3]
        #     else:
        #         offset -= 1
        #         self.display = self.display[1:] + self.display[:1]

        self.display = self.content[offset:] + self.content[:offset]
Example #37
0
 def test_enum(self):
     for i, name in enumerate(
         ['LANDED', 'LANDING', 'TAKING_OFF', 'HOVERING', 'FLYING']):
         if not _DeprecatedIntEnums:
             # no implicit int conversion (for the greater good)
             with self.assertRaises(TypeError):
                 self.assertEqual(int(FlyingState(i)), i)
             with self.assertRaises(TypeError):
                 self.assertEqual(int(FlyingState[name]), i)
         else:
             self.assertEqual(int(FlyingState(i)), i)
             self.assertEqual(int(FlyingState[name]), i)
         self.assertEqual(FlyingState(i).value, i)
         self.assertEqual(FlyingState(i)._value_, i)
         self.assertEqual(FlyingState[name]._value_, i)
         self.assertEqual(FlyingState[name].value, i)
         self.assertEqual(str(FlyingState[name]), 'FlyingState.' + name)
         self.assertEqual(str(FlyingState(i)), 'FlyingState.' + name)
         self.assertEqual(FlyingState[name].name, name)
         self.assertEqual(FlyingState[name]._name_, name)
         self.assertEqual(FlyingState(i).name, name)
         self.assertEqual(FlyingState(i)._name_, name)
 def _parse_po(self, strings):
     """
     Parses ``strings.po`` file into a dict of {'string': id} items.
     """
     ui_strings = {}
     string_id = None
     for string in strings:
         if string_id is None and 'msgctxt' in string:
             string_id = int(re.search(r'"#(\d+)"', string, re.U).group(1))
         elif string_id is not None and 'msgid' in string:
             ui_strings[re.search(r'"(.*?)"', string, re.U).group(1)] = string_id
             string_id = None
     return ui_strings
Example #39
0
def set_capture_graphics(shadow_level):
    """Set shadow quality to level where level is an int between 0 and 4
        Note: We can set texture level, ambient occlusion, etc... but it only affects
        the display window, not the captured cameras.
    """
    if not isinstance(shadow_level, int) and np.issubdtype(shadow_level, int):
        raise ValueError('Shadow level should be an integer')
    shadow_level = int(shadow_level)
    if shadow_level not in SHADOW_RANGE:
        raise ValueError('Shadow level should be between 0 and 4')
    settings = deepdrive_simulation.SimulationGraphicsSettings()
    settings.shadow_quality = shadow_level
    deepdrive_simulation.set_graphics_settings(settings)
Example #40
0
    def _to_pedigree_gource_log_format(self, person_events):
        """
        Return a list of pedigree specific custom gource formatted log entries
        based on the list of person events passed in.
        """

        records = []

        for person, gource_path, related_events in person_events:

            logger.debug("Creating log entries for {0}".format(person.name))

            # Reduce events to only those that contain dates
            related_events_with_dates = []
            for related_event in related_events:
                person_family_object, event, directEvent = related_event
                if event.date:
                    related_events_with_dates.append(related_event)
                else:
                    logger.debug("No date for event {0}".format(event.type))

            if related_events_with_dates:

                for obj, event, directEvent in related_events_with_dates:

                    if event.datetime.year < ref_dt.year:
                        # Year is less than the epoch meaning we can't use
                        # time.mktime to create a useful timestamp for us.
                        # Instead, subtract the necessary seconds from the
                        # epoch time to arrive at the event time.
                        ref_delta = ref_dt - event.datetime
                        delta_seconds = ref_delta.total_seconds()
                        timestamp = ref_timestamp - delta_seconds
                    else:
                        timestamp = time.mktime(event.datetime.timetuple())

                    # Gource requires timestamp as an int
                    timestamp = int(timestamp)

                    # For this particular application we only want to capture
                    # the birth (ADDED) event.

                    if event.type == 'Birth':
                        if directEvent:
                            gource_event = GOURCE_ADDED
                            record = (timestamp, person.surname.lower(),
                                      gource_event, gource_path)
                            records.append(record)

        records.sort()
        return records
def make_activity(entry,media,parent):
    status = {'0':1,'1':2}
    if Activity.objects.filter(title=entry['title']).count() == 0:
        initial = {
                'title' :entry['title'],
                'parent':parent}
        pub_date = datetime.fromtimestamp(int(entry['created']))
        content = replace_media_tag(entry['body']['und'][0]['value'],media)
        initial['full_text'] = content
        initial['status'] = status[entry['status']]

        try:
            email = "*****@*****.**" % entry['name']
            mezzanine_user = User.objects.get(email=email)
        except User.DoesNotExist:
            mezzanine_user = User.objects.get(pk=1)
        initial['user'] = mezzanine_user
        if entry['path']:
            initial['slug'] = entry['path']['alias']
        if entry['field_goals']:
            initial['goals'] = entry['field_goals']['und'][0]['value']
        if entry['field_planning']:
            initial['planning'] = entry['field_planning']['und'][0]['value']
        # if entry['field_archive']:
        #      initial.archive = {u'und': [{u'value': u'no-archive'}]]
        if entry['field_objectives']:
            initial['summary'] = entry['field_objectives']['und'][0]['value']
        if entry['field_observing_time']:
            initial['observing_time'] = int(entry['field_observing_time']['und'][0]['value'])
        initial['publish_date'] = pub_date
        activity, created = Activity.objects.get_or_create(**initial)
        if entry.get('field_discipline',None):
            set_keywords(activity, entry['field_discipline']['und'])
        if created:
            print("Imported activity: %s" % activity)
        return activity
    else:
        return Activity.objects.filter(title=entry['title'])[0], None
Example #42
0
def splitaddress(address):
    try:
        address = to_str(idna.encode(address))
    except (AttributeError, idna.IDNAError):
        pass
    sep = ']:' if address.split(':', 2)[2:] else ':'
    parts = address.rsplit(sep, 1)
    try:
        addr, port = parts
        port = int(port)
    except ValueError:
        addr = parts[0]
        port = None
    return addr, port
Example #43
0
File: fs.py Project: pyblub/pyload
def blksize(path):
    """Get optimal file system buffer size (in bytes) for I/O calls."""
    if os.name != 'nt':
        size = os.statvfs(path).f_bsize
    else:
        import ctypes
        drive = '{0}\\'.format(os.path.splitdrive(os.path.abspath(path))[0])
        cluster_sectors = ctypes.c_longlong(0)
        sector_size = ctypes.c_longlong(0)
        ctypes.windll.kernel32.GetDiskFreeSpaceW(
            ctypes.c_wchar_p(drive), ctypes.pointer(cluster_sectors),
            ctypes.pointer(sector_size), None, None)
        size = int(cluster_sectors.value * sector_size.value)
    return size
    def run(self):
        """
        Run plugin

        :raises SimplePluginError: if unknown action string is provided.
        """
        self._handle = int(sys.argv[1])
        self._params = self.get_params(sys.argv[2][1:])
        self.log_debug(str(self))
        result = self._resolve_function()
        if result is not None:
            raise SimplePluginError(
                'A decorated function must not return any value! '
                'It returned {0} instead.'.format(result))
Example #45
0
    def From(cls, value, timeunit):
        """Returns a new Duration given a timeunit and value.

    Args:
      value: A number specifying the value of the duration.
      timeunit: A unit of time ranging from rdfvalue.MICROSECONDS to
        rdfvalue.WEEKS.
    Examples: >>> Duration.From(50, MICROSECONDS) <Duration 50 us>  >>>
      Duration.From(120, SECONDS) <Duration 2 m>

    Returns:
      A new Duration, truncated to millisecond precision.
    """
        return cls(int(timeunit * value))
Example #46
0
 def __init__(self, initializer=None):
     if isinstance(initializer, ByteSize):
         super(ByteSize, self).__init__(initializer._value)  # pylint: disable=protected-access
     elif isinstance(initializer, string_types):
         super(ByteSize, self).__init__(self._ParseText(initializer))
     elif isinstance(initializer, (int, float)):
         super(ByteSize, self).__init__(initializer)
     elif isinstance(initializer, RDFInteger):
         super(ByteSize, self).__init__(int(initializer))
     elif initializer is None:
         super(ByteSize, self).__init__(0)
     else:
         raise InitializeError("Unknown initializer for ByteSize: %s." %
                               type(initializer))
Example #47
0
def size(value, in_unit, out_unit):
    """Convert file size."""
    in_unit = in_unit.strip()[0].upper()
    out_unit = out_unit.strip()[0].upper()

    if in_unit == out_unit:
        return value

    in_unit += 'yte' if in_unit == 'B' else 'iB'
    out_unit += 'yte' if out_unit == 'B' else 'iB'

    try:
        # Create a bitmath instance representing the input value with its
        # corresponding unit
        in_size = getattr(bitmath, in_unit)(value)
        # Call the instance method to convert it to the output unit
        out_size = getattr(in_size, 'to_' + out_unit)()
        return out_size.value

    except AttributeError:
        sizeunits = ('B', 'K', 'M', 'G', 'T', 'P', 'E')
        sizemap = dict((u, i * 10) for i, u in enumerate(sizeunits))

        in_magnitude = sizemap[in_unit]
        out_magnitude = sizemap[out_unit]

        magnitude = in_magnitude - out_magnitude
        i, d = divmod(value, 1)

        decimal = int(d * (1024**(abs(magnitude) // 10)))
        if magnitude >= 0:
            integer = int(i) << magnitude
        else:
            integer = int(i) >> magnitude * -1
            decimal = -decimal

        return integer + decimal
Example #48
0
 def _read_next_chunk_size(self):
     # Read the next chunk size from the file
     line = self.fp.readline(_MAXLINE + 1)
     if len(line) > _MAXLINE:
         raise LineTooLong("chunk size")
     i = line.find(b";")
     if i >= 0:
         line = line[:i] # strip chunk-extensions
     try:
         return int(line, 16)
     except ValueError:
         # close the connection as protocol synchronisation is
         # probably lost
         self._close_conn()
         raise
Example #49
0
 def __init__(self, initializer=None, age=None):
     super(ByteSize, self).__init__(None, age)
     if isinstance(initializer, ByteSize):
         self._value = initializer._value  # pylint: disable=protected-access
     elif isinstance(initializer, string_types):
         self.ParseFromHumanReadable(initializer)
     elif isinstance(initializer, (int, float)):
         self._value = initializer
     elif isinstance(initializer, RDFInteger):
         self._value = int(initializer)
     elif initializer is None:
         self._value = 0
     else:
         raise InitializeError("Unknown initializer for ByteSize: %s." %
                               type(initializer))
    def _resolve_function(self):
        """
        Resolve route from plugin callback path and call the respective
        route function

        :return: route function's return value
        """
        path = urlparse(sys.argv[0]).path
        self.log_debug('Routes: {0}'.format(self._routes))
        for route in itervalues(self._routes):
            if route.pattern == path:
                kwargs = {}
                self.log_debug(
                    'Calling {0} with kwargs {1}'.format(route, kwargs))
                with log_exception(self.log_error):
                    return route.func(**kwargs)

        for route in itervalues(self._routes):
            pattern = route.pattern
            if not pattern.count('/') == path.count('/'):
                continue
            while True:
                pattern, count = re.subn(r'/(<\w+?>)', r'/(?P\1.+?)', pattern)
                if not count:
                    break
            match = re.search(r'^' + pattern + r'$', path)
            if match is not None:
                kwargs = match.groupdict()
                # list allows to manipulate the dict during iteration
                for key, value in list(iteritems(kwargs)):
                    if key.startswith('int__') or key.startswith('float__'):
                        del kwargs[key]
                        if key.startswith('int__'):
                            key = key[5:]
                            value = int(value)
                        else:
                            key = key[7:]
                            value = float(value)
                        kwargs[key] = value
                    else:
                        kwargs[key] = py2_decode(unquote_plus(value))
                self.log_debug(
                    'Calling {0} with kwargs {1}'.format(route, kwargs))
                with log_exception(self.log_error):
                    return route.func(**kwargs)
        raise SimplePluginError(
            'No route matches the path "{0}"!'.format(path)
        )
Example #51
0
def karma(sender, **kwargs):
    """
    Each time a rating is saved, check its value and modify the
    profile karma for the related object's user accordingly.
    Since ratings are either +1/-1, if a rating is being edited,
    we can assume that the existing rating is in the other direction,
    so we multiply the karma modifier by 2.
    """
    rating = kwargs["instance"]
    value = int(rating.value)
    if not kwargs["created"]:
        value *= 2
    content_object = rating.content_object
    if rating.user != content_object.user:
        queryset = get_profile_model().objects.filter(user=content_object.user)
        queryset.update(karma=models.F("karma") + value)
Example #52
0
 def set(self,
         domain,
         name,
         value,
         path='/',
         expires=None,
         secure=False,
         tailmatch=False):
     self.__dict__[name] = dict()
     self.__dict__[name]['id'] = to_str(value)
     self.__dict__[name]['domain'] = to_str(domain)
     self.__dict__[name]['tailmatch'] = 'TRUE' if tailmatch else 'FALSE'
     self.__dict__[name]['path'] = to_str(path)
     self.__dict__[name]['secure'] = 'TRUE' if secure else 'FALSE'
     self.__dict__[name]['expires'] = int(expires
                                          or time.time() + self.EXPIRE_TIME)
    def forwards(self, orm):

        # Postgres throws error if trying to cast from varchar to integer field,
        # so we convert in Python. Should be OK since Field table is unlikely to
        # have many records.
        if not db.dry_run:
            data = list(orm['forms.Field'].objects.values_list('id', 'field_type'))

        db.delete_column('forms_field', 'field_type')
        db.add_column('forms_field', 'field_type', self.gf('django.db.models.fields.IntegerField')(null=True))

        if not db.dry_run:
            for pk, field_type in data:
                orm['forms.Field'].objects.filter(id=pk).update(field_type=int(field_type))

        db.alter_column('forms_field', 'field_type', self.gf('django.db.models.fields.IntegerField')())
Example #54
0
    def update_size(self):
        # get terminal size
        size = terminalsize.get_terminal_size()
        x = max(size[0], 10)
        y = max(size[1], 25)
        if (x, y) == (self.x, self.y):  # no need to resize
            return
        self.x, self.y = x, y

        # update intendations
        curses.resizeterm(self.y, self.x)
        self.startcol = int(float(self.x) / 5)
        self.indented_startcol = max(self.startcol - 3, 0)
        self.update_space()
        self.screen.clear()
        self.screen.refresh()
Example #55
0
    def write_header(self, buf):
        self.header += buf
        # TODO: forward headers?, this is possibly unneeded,
        # when we just parse valid 200 headers as first chunk,
        # we will parse the headers
        if not self.range and self.header.endswith(os.linesep * 2):
            self.parse_header()
        # ftp file size parsing
        elif (not self.range and buf.startswith('150')
              and 'data connection' in buf):
            size = re.search(r"(\d+) bytes", buf)
            if size is not None:
                self.p._size = int(size.group(1))
                self.p.chunk_support = True

        self.header_parsed = True
Example #56
0
def ValidateCSRFTokenOrRaise(request):
    """Decorator for WSGI handler that checks CSRF cookie against the request."""

    # CSRF check doesn't make sense for GET/HEAD methods, because they can
    # (and are) used when downloading files through <a href> links - and
    # there's no way to set X-CSRFToken header in this case.
    if request.method in ("GET", "HEAD"):
        return

    # In the ideal world only JavaScript can be used to add a custom header, and
    # only within its origin. By default, browsers don't allow JavaScript to
    # make cross origin requests.
    #
    # Unfortunately, in the real world due to bugs in browsers plugins, it can't
    # be guaranteed that a page won't set an HTTP request with a custom header
    # set. That's why we also check the contents of a header via an HMAC check
    # with a server-stored secret.
    #
    # See for more details:
    # https://www.owasp.org/index.php/Cross-Site_Request_Forgery_(CSRF)_Prevention_Cheat_Sheet
    # (Protecting REST Services: Use of Custom Request Headers).
    csrf_token = request.headers.get("X-CSRFToken", "").encode("ascii")
    if not csrf_token:
        logging.info("Did not find headers CSRF token for: %s", request.path)
        raise werkzeug_exceptions.Forbidden("CSRF token is missing")

    try:
        decoded = base64.urlsafe_b64decode(csrf_token + b"==")
        digest, token_time = decoded.rsplit(CSRF_DELIMITER, 1)
        token_time = int(token_time)
    except (TypeError, ValueError):
        logging.info("Malformed CSRF token for: %s", request.path)
        raise werkzeug_exceptions.Forbidden("Malformed CSRF token")

    if len(digest) != hashlib.sha256().digest_size:
        logging.info("Invalid digest size for: %s", request.path)
        raise werkzeug_exceptions.Forbidden("Malformed CSRF token digest")

    expected = GenerateCSRFToken(request.user, token_time)
    if not constant_time.bytes_eq(csrf_token, expected):
        logging.info("Non-matching CSRF token for: %s", request.path)
        raise werkzeug_exceptions.Forbidden("Non-matching CSRF token")

    current_time = rdfvalue.RDFDatetime.Now().AsMicrosecondsSinceEpoch()
    if current_time - token_time > CSRF_TOKEN_DURATION.microseconds:
        logging.info("Expired CSRF token for: %s", request.path)
        raise werkzeug_exceptions.Forbidden("Expired CSRF token")
Example #57
0
def _fake_linecache(text, filename):
    """ Inject text into the linecache for traceback purposes.

    Parameters
    ----------
    text : str
        The text of the file.

    filename : str
        The name of the file.

    """
    size = len(text)
    mtime = int(1343290295)
    lines = text.splitlines()
    lines = [l + '\n' for l in lines]
    linecache.cache[filename] = size, mtime, lines, filename
Example #58
0
    def __init__(self):
        self.screen = curses.initscr()
        # self.screen.timeout(100)  # the screen refresh every 100ms
        # # charactor break buffer
        # curses.cbreak()
        # self.screen.keypad(1)

        curses.start_color()
        if Config().get("curses_transparency"):
            curses.use_default_colors()
            curses.init_pair(1, curses.COLOR_GREEN, -1)
            curses.init_pair(2, curses.COLOR_CYAN, -1)
            curses.init_pair(3, curses.COLOR_RED, -1)
            curses.init_pair(4, curses.COLOR_YELLOW, -1)
        else:
            colors = Config().get("colors")
            if ("TERM" in os.environ and os.environ["TERM"] == "xterm-256color"
                    and colors):
                curses.use_default_colors()
                for i in range(1, 6):
                    color = colors["pair" + str(i)]
                    curses.init_pair(i, color[0], color[1])
                self.screen.bkgd(32, curses.color_pair(5))
            else:
                curses.init_pair(1, curses.COLOR_GREEN, curses.COLOR_BLACK)
                curses.init_pair(2, curses.COLOR_CYAN, curses.COLOR_BLACK)
                curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLACK)
                curses.init_pair(4, curses.COLOR_YELLOW, curses.COLOR_BLACK)
        # term resize handling
        size = get_terminal_size()
        self.x = size[0]
        self.y = size[1]
        self.playerX = 1  # terminalsize.get_terminal_size()[1] - 10
        self.playerY = 0
        self.startcol = int(float(self.x) / 5)
        self.indented_startcol = max(self.startcol - 3, 0)
        self.update_space()
        self.lyric = ""
        self.now_lyric = ""
        self.post_lyric = ""
        self.now_lyric_index = 0
        self.now_tlyric_index = 0
        self.tlyric = ""
        self.storage = Storage()
        self.config = Config()
        self.newversion = False
Example #59
0
def smooth_func(f, t, window_len=None, window='flat'):
    """Smooth a function f at time samples t"""
    if window_len is None:
        f_ = f(t)
    else:
        dt = t[1] - t[0]
        if np.sum(np.abs(np.diff(t) - dt)) > 1e-5:
            msg = 'samples have to be evenly spaced'
            raise ValueError(msg)
        samples = int(round(window_len / dt))
        N1 = (samples - 1) // 2
        N2 = samples // 2
        t_ = np.hstack((t[0] - N1 * dt + np.arange(N1) * dt, t,
                        t[-1] + dt + np.arange(N2) * dt))
        f_ = f(t_)
        f_ = smooth(f_, samples, method=None, window=window)
    return f_
Example #60
0
 def __init__(self, initializer=None, age=None):
   super(Duration, self).__init__(None, age)
   if isinstance(initializer, Duration):
     self._value = initializer._value  # pylint: disable=protected-access
   elif isinstance(initializer, Text):
     self.ParseFromHumanReadable(initializer)
   elif isinstance(initializer, bytes):
     self.ParseFromString(initializer)
   elif isinstance(initializer, int):
     self._value = initializer
   elif isinstance(initializer, RDFInteger):
     self._value = int(initializer)
   elif initializer is None:
     self._value = 0
   else:
     message = "Unsupported initializer `{value}` of type `{type}`"
     raise TypeError(message.format(value=initializer, type=type(initializer)))