示例#1
0
    def match(self):
        """
        Matches the paste against a series of regular expressions to determine if the paste is 'interesting'

        Sets the following attributes:
                self.emails
                self.hashes
                self.num_emails
                self.num_hashes
                self.db_keywords
                self.type

        """
        # Get the amount of emails
        self.emails = list(set(regexes['email'].findall(self.text)))
        self.hashes = regexes['hash32'].findall(self.text)
        self.num_emails = len(self.emails)
        self.num_hashes = len(self.hashes)

        if self.num_emails > 0:
            self.sites = list(set([re_search('@(.*)$', email).group(1).lower() for email in self.emails]))

        for regex in regexes['db_keywords']:
            if regex.search(self.text):
                # logging.debug('\t[+] ' + regex.search(self.text).group(1))
                self.db_keywords += round(1 / float(
                    len(regexes['db_keywords'])), 2)

        for regex in regexes['blacklist']:
            if regex.search(self.text):
                # logging.debug('\t[-] ' + regex.search(self.text).group(1))
                self.db_keywords -= round(1.25 * (
                    1 / float(len(regexes['db_keywords']))), 2)

        if (self.num_emails >= 20) \
                or (self.num_hashes >= 30) \
                or (self.db_keywords >= .55):
            self.type = 'db_dump'

        if regexes['cisco_hash'].search(self.text) or regexes['cisco_pass'].search(self.text):
            self.type = 'cisco'

        if regexes['honeypot'].search(self.text):
            self.type = 'honeypot'

        if regexes['google_api'].search(self.text):
            self.type = 'google_api'

        if regexes['pgp_private'].search(self.text):
            self.type = 'pgp_private'

        if regexes['ssh_private'].search(self.text):
            self.type = 'ssh_private'

        for regex in regexes['banlist']:
            if regex.search(self.text):
                self.type = None
                break

        return self.type
示例#2
0
def _reffactory(seqdir, seqfmt, name=None, genes=None):
    if name is None:
        name = re_compile(r'[^0-9A-Z]', re_I).sub('_', basename(seqdir))
    datadict = {}
    if genes is not None:
        for gene in genes:
            datadict[gene] = _lazyseq(seqdir, seqfmt % gene)
    else:
        genes = []
        for seqpath in iglob(join(seqdir, seqfmt % '*')):
            m = re_search(seqfmt % '(.+)', seqpath)
            if m:
                gene = m.group(1)
                genes.append(gene)
                datadict[gene] = _lazyseq(seqdir, basename(seqpath))
    # if the seqdir has the refdir in it, then add it to the list
    # of default-installed reference sequence directories
    if _refdir in seqdir:
        globber = '*' + splitext(seqfmt)[1]
        _installrefdirs.append(
            join(
                'data',
                'references',
                basename(seqdir),
                globber
            )
        )
    return namedtuple(name, genes)(**datadict)
示例#3
0
def git_versions_from_keywords(keywords, tag_prefix_, verbose=False):
   if not keywords:
      return {}  # keyword-finding function failed to find keywords
   refnames = keywords['refnames'].strip()
   if refnames.startswith('$Format'):
      if verbose:
         print('keywords are unexpanded, not using')
      return {}  # unexpanded, so not in an unpacked git-archive tarball
   refs = set([r.strip() for r in refnames.strip('()').split(',')])
   # starting in git-1.8.3, tags are listed as 'tag: foo-1.0' instead of just 'foo-1.0'. If we see a 'tag: ' prefix, prefer
   # those.
   # noinspection PyPep8Naming
   TAG = 'tag: '
   tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
   if not tags:
      # Either we're using git < 1.8.3, or there really are no tags. We use a heuristic: assume all version tags have a
      # digit. The old git %d expansion behaves like git log --decorate=short and strips out the refs/heads/ and refs/tags/
      # prefixes that would let us distinguish between branches and tags. By ignoring refnames without digits, we filter out
      # many common branch names like 'release' and 'stabilization', as well as 'HEAD' and 'master'.
      tags = set([r for r in refs if re_search(r'\d', r)])
      if verbose:
         print('discarding <{}>, no digits'.format(','.join(refs - tags)))
   if verbose:
      print('likely tags: {}'.format(','.join(sorted(tags))))
   for ref in sorted(tags):
      # sorting will prefer e.g. '2.0' over '2.0rc1'
      if ref.startswith(tag_prefix_):
         r = ref[len(tag_prefix_):]
         if verbose:
            print('picking {}'.format(r))
         return {'version': r, 'full': keywords['full'].strip()}
   # no suitable tags, so we use the full revision id
   if verbose:
      print('no suitable tags, using full revision id')
   return {'version': keywords['full'].strip(), 'full': keywords['full'].strip()}
示例#4
0
def get_area_field(layer, fields, field_types):
    print fields
    print field_types
    candidates = []
    for i, field in enumerate(fields):
        field = fields[i]
        field_lower = field.lower()
        field_type = field_types[i]
        values_list = [value.lower() if isinstance(value, str) or isinstance(value, unicode) else value for value in layer.get_fields(field)]
        if field_type == "OFTReal" and any(word in field_lower for word in ["area","sqkm","sqkm","km2","km_2"]):
            values_median = median(values_list)

            # we certainly don't want this if the median
            if values_median > 0:
                candidates.append((field, values_median))

    # sort candidates by median
    candidates = sorted(candidates, key = lambda candidate: -1*candidate[1])

    if number_of_candidates == 0:
        return None
    elif number_of_candidates == 1:
        return candidates[0][0]
    elif number_of_candidates > 0:
        sqkms = [c for c in candidates if re_search("(km2|sqkm|sq_km)", c[0].lower())]
        if number_of_sqkms == 0:
            return candidates[0][0]
        else:
            return sqkms[0][0]
示例#5
0
def _smfactory(smdir=_smdir, smfmt='%s.txt'):
    matrices = {}
    for smpath in iglob(join(smdir, smfmt % '*')):
        m = re_search(smfmt % '(.+)', smpath)
        if m:
            name = basename(m.group(1))
            matrices[name] = _lazyscorematrix(name, smdir, basename(smpath))
    return matrices
示例#6
0
    def do_GET(self):
        headers = {}
        response_body = 'https://github.com/elespike/burp-cph/wiki/00.-Interactive-demos'

        if self.path == '/':
            headers['Content-Type'] = 'text/html'
            response_body = '<h2>Welcome!</h2>Please <a href="https://github.com/elespike/burp-cph/wiki/00.-Interactive-demos">visit the Wiki </a> for instructions.'

        if self.path.startswith('/number'):
            response_body = str(TinyHandler.the_number)

        if self.path.startswith('/indices'):
            response_body = '[0][ ]1st  [1][ ]2nd  [2][ ]3rd\n\n[3][ ]4th  [4][ ]5th  [5][ ]6th\n\n[6][ ]7th  [7][ ]8th  [8][ ]9th'

        # E.g., /1/12345
        s = re_search('^/[123]/?.*?(\d{1,5})$', self.path)
        if s is not None:
            number = TinyHandler.normalize(s.group(1))
            if number == TinyHandler.the_number:
                response_body = '{} was correct!'.format(number)
            else:
                response_body = 'Try again!'
            TinyHandler.the_number = randint(1, 99999)
            response_body += '\nNew number: {}'.format(TinyHandler.the_number)

        if self.path.startswith('/echo/'):
            response_body = self.path.replace('/echo/', '')
            response_body = unquote(response_body)

        if self.path.startswith('/check'):
            number = 0
            s = re_search('number=(\d{1,5})', self.headers.get('cookie', ''))
            if s is not None and s.groups():
                number = TinyHandler.normalize(s.group(1))
            if not number:
                # Search again in the path/querystring.
                s = re_search('\d{1,5}', self.path)
                if s is not None:
                    number = TinyHandler.normalize(s.group(0))
            if number == TinyHandler.the_number:
                response_body = '{} was correct!'.format(number)
            else:
                response_body = 'Try again!'

        self.respond(response_body, headers)
示例#7
0
def git_get_keywords(versionfile_abs):
   # the code embedded in _version.py can just fetch the value of these keywords. When used from setup.py, we don't want to
   # import _version.py, so we do it with a regexp instead. This function is not used from _version.py.
   keywords = {}
   try:
      f = open(versionfile_abs, 'r')
      for line in f.readlines():
         if line.strip().startswith('git_refnames ='):
            mo = re_search(r'=\s*"(.*)"', line)
            if mo:
               keywords['refnames'] = mo.group(1)
         if line.strip().startswith('git_full ='):
            mo = re_search(r'=\s*"(.*)"', line)
            if mo:
               keywords['full'] = mo.group(1)
      f.close()
   except EnvironmentError:
      pass
   return keywords
示例#8
0
文件: build.py 项目: MaikuMori/wuiup
def dispatch(argv):
    if len(argv) == 1:
        #And here comes the over-complicated test :D~~.
        if re_search("^\d+\.([1-9]|\d+[1-9])(.[1-9]|.\d+[1-9])?$", argv[0]):
            build(argv[0])
        else:
            #mayor_release.feature_release or
            #mayor_release.feature_release.bugfix_release
            print "The version number must be in format:"
            print "  (0-999+).(1-999+) or (0-999+).(1-999+).(1-999+)"
    else:
        print "You must provide version number."
        print "  For exmaple:\t python build.py 0.32"
示例#9
0
def add_tag_page(request, boardname, bid, pid, pidx):
  if re_search("\d\d", boardname) and int(boardname) != int(request.user.get_profile().get_pure_sid()):
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당 게시판에 접근할 권한이'})
  board = get_board(boardname)
  try:
    if not board.group_board.members.filter(id=request.user.id).exists():
      return render_to_response('noExist.html',{'user':request.user, 'target':'해당 소모임게시판에 접근할 권한이'})
  except ObjectDoesNotExist:
    pass
  try:
    bulletin = Bulletin.bulletIns.get(id=bid)
  except ObjectDoesNotExist:
    return render_to_response('noexist.html',{'user':request.user, 'target':'해당글이'})
  try:
    photo = Photo.objects.get(id=pid)
  except ObjectDoesNotExist:
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당사진이'})
  page = request.GET.get('page', 1)

  if request.method == 'POST':
    key = request.POST["key"]
    if key:
      x = request.POST["x"]
      y = request.POST["y"]
      w = request.POST["w"]
      h = request.POST["h"]
      if x != '-' and y != '-' and w != '-' and h != '-':
        PhotoTag.objects.create(photo=photo,
            title=key,
            x=x,
            y=y,
            w=w,
            h=h,
            )
        # Feed 추가
        if bulletin.writer != request.user:
          Feed.objects.create(
            url="/board/%s/read/%d/"%(boardname, int(bid)),
            from_user=request.user,
            to_user=bulletin.writer,
            additional=key[:20],
            type=u'TN',
          )
        return HttpResponse("<script>opener.window.location.href='/board/%s/read/%d/?page=%d&gotoIndex=%d';/*opener.window.location.reload(true);*/ self.close();</script>" % (boardname, int(bid), int(page), int(pidx)-1))
  userSearchForm = UserSearchForm(request.GET)
  tpl = loader.get_template('board/addTag.html')    # addTag.html이라는 페이지를 template로 하여 출력합니다.
  ctx = RequestContext(request, {            # parameter를 dictionary형식으로 넣을 수 있습니다.
    'userSearchForm':userSearchForm,
    'photo':photo,
    })
  return HttpResponse(tpl.render(ctx))
示例#10
0
    def do_POST(self):
        headers = {}
        response_body = 'Try again!'

        content_length = int(self.headers.get('content-length', 0))
        body = self.rfile.read(size=content_length)

        if self.path.startswith('/cookie'):
            number = 0
            # Accept both JSON and url-encoded form data.
            try:
                number = TinyHandler.normalize(loads(body)['number'])
            except:
                s = re_search('number=(\d{1,5})', body)
                if s is not None and s.groups():
                    number = TinyHandler.normalize(s.group(1))
            if number == TinyHandler.the_number:
                headers['Set-Cookie'] = 'number={}'.format(TinyHandler.the_number)
                response_body = '"number" cookie set to {}!'.format(TinyHandler.the_number)

        if self.path.startswith('/number'):
            s = re_search('number=(\d{1,5})', self.headers.get('cookie', ''))
            number_cookie = 0
            if s is not None and s.groups():
                number_cookie = int(s.group(1))
            if number_cookie == TinyHandler.the_number:
                number = randint(1, 99999)
                # Accept both JSON and url-encoded form data.
                try:
                    number = TinyHandler.normalize(loads(body)['number'])
                except:
                    s = re_search('number=(\d{1,5})', body)
                    if s is not None and s.groups():
                        number = TinyHandler.normalize(s.group(1))
                TinyHandler.the_number = number
                response_body = 'Number set to {}!'.format(TinyHandler.the_number)

        self.respond(response_body, headers)
示例#11
0
文件: HTTP.py 项目: Z3po/inventorize
    def connect_HTTP(self, arguments): # {{{
        if arguments['expect'] == 'None':
            expect = ''
        else:
            expect = arguments['expect']
        try:
            urlhandle = urllib.urlopen(arguments['url'])
            for line in urlhandle.readlines():
                if re_search(expect, line.strip()):
                    return True
        except:
            return False

        return False
示例#12
0
    def IPaddrFinished(self, result, retval, extra_args):
        (iface, callback) = extra_args
        data = {"up": False, "dhcp": False, "preup": False, "predown": False}
        globalIPpattern = re_compile("scope global")
        ipRegexp = "[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}"
        netRegexp = "[0-9]{1,2}"
        macRegexp = "[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}"
        ipLinePattern = re_compile("inet " + ipRegexp + "/")
        ipPattern = re_compile(ipRegexp)
        netmaskLinePattern = re_compile("/" + netRegexp)
        netmaskPattern = re_compile(netRegexp)
        bcastLinePattern = re_compile(" brd " + ipRegexp)
        upPattern = re_compile("UP")
        macPattern = re_compile(macRegexp)
        macLinePattern = re_compile("link/ether " + macRegexp)

        for line in result.splitlines():
            split = line.strip().split(" ", 2)
            if split[1][:-1] == iface:
                up = self.regExpMatch(upPattern, split[2])
                mac = self.regExpMatch(macPattern, self.regExpMatch(macLinePattern, split[2]))
                if up is not None:
                    data["up"] = True
                    if iface is not "lo":
                        self.configuredInterfaces.append(iface)
                if mac is not None:
                    data["mac"] = mac
            if split[1] == iface:
                if re_search(globalIPpattern, split[2]):
                    ip = self.regExpMatch(ipPattern, self.regExpMatch(ipLinePattern, split[2]))
                    netmask = self.calc_netmask(
                        self.regExpMatch(netmaskPattern, self.regExpMatch(netmaskLinePattern, split[2]))
                    )
                    bcast = self.regExpMatch(ipPattern, self.regExpMatch(bcastLinePattern, split[2]))
                    if ip is not None:
                        data["ip"] = self.convertIP(ip)
                    if netmask is not None:
                        data["netmask"] = self.convertIP(netmask)
                    if bcast is not None:
                        data["bcast"] = self.convertIP(bcast)

        if not data.has_key("ip"):
            data["dhcp"] = True
            data["ip"] = [0, 0, 0, 0]
            data["netmask"] = [0, 0, 0, 0]
            data["gateway"] = [0, 0, 0, 0]

        cmd = "route -n | grep  " + iface
        self.Console.ePopen(cmd, self.routeFinished, [iface, data, callback])
示例#13
0
文件: Network.py 项目: torac/enigma2
	def IPaddrFinished(self, result, retval, extra_args):
		(iface, callback ) = extra_args
		data = { 'up': False, 'dhcp': False, 'preup' : False, 'predown' : False }
		globalIPpattern = re_compile("scope global")
		ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
		netRegexp = '[0-9]{1,2}'
		macRegexp = '[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}\:[0-9a-fA-F]{2}'
		ipLinePattern = re_compile('inet ' + ipRegexp + '/')
		ipPattern = re_compile(ipRegexp)
		netmaskLinePattern = re_compile('/' + netRegexp)
		netmaskPattern = re_compile(netRegexp)
		bcastLinePattern = re_compile(' brd ' + ipRegexp)
		upPattern = re_compile('UP')
		macPattern = re_compile(macRegexp)
		macLinePattern = re_compile('link/ether ' + macRegexp)

		for line in result.splitlines():
			split = line.strip().split(' ',2)
			if (split[1][:-1] == iface):
				up = self.regExpMatch(upPattern, split[2])
				mac = self.regExpMatch(macPattern, self.regExpMatch(macLinePattern, split[2]))
				if up is not None:
					data['up'] = True
					if iface is not 'lo':
						self.configuredInterfaces.append(iface)
				if mac is not None:
					data['mac'] = mac
			if (split[1] == iface):
				if re_search(globalIPpattern, split[2]):
					ip = self.regExpMatch(ipPattern, self.regExpMatch(ipLinePattern, split[2]))
					netmask = self.calc_netmask(self.regExpMatch(netmaskPattern, self.regExpMatch(netmaskLinePattern, split[2])))
					bcast = self.regExpMatch(ipPattern, self.regExpMatch(bcastLinePattern, split[2]))
					if ip is not None:
						data['ip'] = self.convertIP(ip)
					if netmask is not None:
						data['netmask'] = self.convertIP(netmask)
					if bcast is not None:
						data['bcast'] = self.convertIP(bcast)

		if not data.has_key('ip'):
			data['dhcp'] = True
			data['ip'] = [0, 0, 0, 0]
			data['netmask'] = [0, 0, 0, 0]
			data['gateway'] = [0, 0, 0, 0]

		cmd = "route -n | grep  " + iface
		self.Console.ePopen(cmd,self.routeFinished, [iface, data, callback])
示例#14
0
def parseParams(opts):

    found = opts.conf_Path

    if not found:
        print "configuration directory is not exit!"
        sys.exit(0)

    recipe = found
    trmap = dict()
    for root, dirs, files in os.walk(recipe):
        for filespath in files:
            if re_match('.*ml$', filespath):
                filename = re_search(r'(.*)\..*ml$', filespath).group(1)
                trmap[filename] = expYaml(os.path.join(root, filespath))

    return trmap
示例#15
0
def checksum(sentence):
    """ Calculate the checksum for a sentence (e.g. NMEA string). """

    result = {'checksum':None}
    # Remove any newlines
    if re_search("\n$", sentence):
        sentence = sentence[:-1]

    nmeadata,cksum = re_split('\*', sentence)

    calc_cksum = 0
    for s in nmeadata:
        calc_cksum ^= ord(s)

    # Return the nmeadata, the checksum from sentence, and the calculated checksum
    result['checksum'] = hex(calc_cksum)[2:].upper()
    return result
示例#16
0
def delete_file_page(request, boardname, fid):
  if re_search("\d\d", boardname) and int(boardname) != int(request.user.get_profile().get_pure_sid()):
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당 게시판에 접근할 권한이'})
  page = int(request.GET.get('page', 1))
  try:
    file = RelatedFile.objects.get(id=fid)
  except ObjectDoesNotExist:
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당파일이'})
  bulletin = file.bulletin
  bid = bulletin.id

  can_delete = not((datetime.now() - bulletin.created).days >= settings.CAN_MODIFY_DAYS)
  if can_delete and bulletin.isMyBulletin(request.user):
    file.delete()    # 여기서 파일을 삭제한다.
    return HttpResponseRedirect('/board/%s/modify/%d/?page=%d'% (boardname, int(bid), page))
  else:
    # 삭제를 못하는 경우이다.
    return HttpResponseRedirect('/board/%s/modify/%d/?page=%d'% (boardname, int(bid), page))
示例#17
0
def parseParams(config_path):

    found = filter(lambda x: isdir(x),
                    (config_path, '/etc/secdd/conf'))

    if not found:
        print "configuration directory is not exit!"
        sys.exit(0)

    recipe = found[0]
    trmap = dict()
    for root, dirs, files in os.walk(recipe):
        for filespath in files:
            if re_match('.*ml$', filespath):
                filename = re_search(r'(.*)\..*ml$', filespath).group(1)
                trmap[filename] = expYaml(os.path.join(root, filespath))

    return trmap
def write_fastd_config_limit(settings, instance, limit, uptime):
    '''
    Writes calculated limit to the config file of ``fastd``.

    :param settings: script settings
    :param instance: fastd instance name
    :param limit: calculated fastd peer limit to write
    :param uptime: current fastd daemon uptime in seconds
    :return: ``True`` if ``fastd`` should be restarted then.
    '''
    LIMIT_RX = r'peer limit ([\d]+);'

    #: locate the fastd config
    config_file = settings['fastd_config'] % (instance)
    if not path.exists(config_file):
        print('~ %s: %s not found' %(instance,config_file))
        return False

    #: load config to string
    with open (config_file, "r") as file:
        lines = file.readlines()
        config = ''.join(lines)

    #: find current peer limit in fast config
    #: skip the rest if none present
    match = re_search(LIMIT_RX, config)
    if not match:
        print('~ no peer limit present in config for %s. skipping' % (instance))
        return False

    old_limit = int(match.group(1))

    #: replacing the current limit with the calculated limit
    new_config = re_sub(LIMIT_RX, 'peer limit %s;' % (limit), config)
    with open (config_file, "w") as file:
        file.write(new_config)

    #: return ``True`` if there was a huge bump in the limit, or
    #: fast was running long enough..
    return any([
        abs(limit - old_limit) >= settings['additional'],
        uptime >= settings['restart_max']
    ])
示例#19
0
    def query_vk_audio(self, search):
        result_array = []
        search = unidecode(search)
        self.get(SEARCH_MUSIC_ROOT % search)
        results = self.find_elements_by_class_name('audio')
        for song_node in results:
            td = song_node.find_elements_by_class_name('play_btn_wrap')[0].find_element_by_xpath('..')
            input_ = td.find_elements_by_css_selector('*')[-1]
            link = input_.get_attribute('value')

            id_ = re_search('[\d_]+$', song_node.get_attribute('id')).group()
            title_section = song_node.find_element_by_xpath("//span[contains(@id, '{id}')]".format(id=id_))
            title = title_section.text
            if not title:
                break
            result_array.append(dict(title=title, author=search, url=link))
        logger.info('fetched %s objects' % len(result_array))
        logger.info('return fetch query')
        return result_array
示例#20
0
    def name_is_valid(cls, name):
        if not name:
            return False

        if len(name) > 100:
            return False

        if re_search(r'[^a-z0-9_]', name):
            return False

        if '__' in name:
            return False

        if name.startswith('_'):
            return False

        if name.endswith('_'):
            return False

        return True
示例#21
0
def _create_slug(word):
    if len(re_findall(u'([а-я:;_,.\s\$!\?\"\'@#\+\(\)&\^№=\*%]+)', word)) == 0:
        return word
    ru = {
        u'а': 'a', u'б': 'b', u'в': 'v', u'г': 'g', u'д': 'd',
        u'е': 'e', u'ё': 'e', u'ж': 'zh', u'з': 'z', u'и': 'i',
        u'й': 'i', u'к': 'k', u'л': 'l', u'м': 'm', u'н': 'n',
        u'о': 'o', u'п': 'p', u'р': 'r', u'с': 's', u'т': 't',
        u'у': 'u', u'ф': 'f', u'х': 'h', u'ц': 'c', u'ч': 'ch',
        u'ш': 'sh', u'щ': 'sh', u'ъ': '', u'ы': 'i', u'ь': '',
        u'э': 'e', u'ю': 'yu', u'я': 'ia', '_': '-', '-': '-',
        ' ': '-'
    }
    ret = ''
    for letter in word.lower():
        if letter in ru:
            ret += ru[letter]
        elif re_search(r'[a-z0-9]', letter) is not None:
            ret += letter
    return ret
示例#22
0
def delete_page(request, boardname, bid):
  if re_search("\d\d", boardname) and int(boardname) != int(request.user.get_profile().get_pure_sid()):
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당 게시판에 접근할 권한이'})
  try:
    bulletin = Bulletin.bulletIns.get(id=bid)
  except ObjectDoesNotExist:
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당글이'})
  page = int(request.GET.get('page', 1))
  # 이미 삭제된 글이면 걍 넘어갑니다. *수상한놈*
  if bulletin.deleted:
    return HttpResponseRedirect('/board/%s/read/%d/?page=%d'% (boardname, int(bid), page))

  can_delete = not((datetime.now() - bulletin.created).days >= settings.CAN_MODIFY_DAYS)
  if can_delete and bulletin.isMyBulletin(request.user):
    # bulletin.delete()    # 여기서 게시물을 삭제한다.
    #실제로 삭제하는 것이 아니라 삭제 플래그만 달아준다.
    bulletin.deleted = True
    # 관련 position과 gallery와 파일을 삭제한다.
    try:
      relatedPosition = RelatedPosition.objects.get(bulletin=bulletin)
    except ObjectDoesNotExist:
      relatedPosition = None
    if relatedPosition:
      relatedPosition.delete()
    if bulletin.gallery:        # 이미 있으면 삭제
      # 이건 특이한 케이스이기 때문에...
      photos_list = bulletin.gallery.photos.all()
      for photo in photos_list:
        photo.delete()
      bulletin.gallery.delete()    # 여기 안에 사진 다 삭제해야 함..ㅠ
      bulletin.gallery = None
    for file in RelatedFile.objects.filter(bulletin=bulletin): # 파일도 다 삭제해 줍시다
      file.delete()

    bulletin.save()
    # 글 삭제는 포인트 POINT_BULLETIN 만큼 차감.
    reducePoint(request, bulletin)
    return HttpResponseRedirect('/board/%s/read/%d/?page=%d'% (boardname, int(bid), page))
  else:
    # 삭제를 못하는 경우이다.
    return HttpResponseRedirect('/board/%s/read/%d/?page=%d'% (boardname, int(bid), page))
示例#23
0
文件: xlsx_proc.py 项目: okxjd/okxjd
def _check_and_prepare(source=None, result=None, file_type="xlsx"):
    wrk_dir = os.getcwd()
    source_dir = os.path.abspath("".join([wrk_dir, "\\{0}\\".format(source)]))
    result_dir = os.path.abspath("".join([wrk_dir, "\\{0}\\".format(result)]))
    try:
        if not os.path.exists(result_dir):
            os.mkdir(result_dir)
    except OSError:
        logging.error('{0}: Unable to create catalog "{1}"'.format(__name__.upper(), result_dir))
        return (False,) * 3
    if not os.path.exists(source_dir):
        logging.error('{0}: Unable to find catalog "{1}"'.format(__name__.upper(), source_dir))
        return (False,) * 3
    else:
        f = os.listdir(source_dir)
        patt = "^.*\.{0}$".format(file_type)
        fl = [i for i in f if re_search(patt, i.lower())]
        if not fl:
            logging.error('{0}: No {1}-files in catalog "{2}"'.format(__name__.upper(), file_type, source_dir))
            return (False,) * 3
        else:
            return (fl, source_dir, result_dir)
示例#24
0
	def IPaddrFinished(self, result, retval, extra_args):
		#def calc_netmask(self,nmask):
		#	from struct import pack, unpack
		#	from socket import inet_ntoa, inet_aton
		#	mask = 1L<<31
		#	xnet = (1L<<32)-1
		#	cidr_range = range(0, 32)
		#	cidr = long(nmask)
		#	if cidr not in cidr_range:
		#		print 'cidr invalid: %d' % cidr
		#		return None
		#	else:
		#		nm = ((1L<<cidr)-1)<<(32-cidr)
		#		netmask = str(inet_ntoa(pack('>L', nm)))
		#		return netmask

		iface = extra_args
		globalIPpattern = re_compile("scope global")
		ipRegexp = '[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}'
		#netRegexp = '[0-9]{1,2}'
		ipLinePattern = re_compile('inet ' + ipRegexp)
		peerLinePattern = re_compile('peer ' + ipRegexp + '/')
		ipPattern = re_compile(ipRegexp)
		#netmaskLinePattern = re_compile('/' + netRegexp)
		#netmaskPattern = re_compile(netRegexp)
		
		for line in result.splitlines():
			split = line.strip().split(' ',2)
			if (split[1] == iface):
				if re_search(globalIPpattern, split[2]):
					ip = self.regExpMatch(ipPattern, self.regExpMatch(ipLinePattern, split[2]))
					peer = self.regExpMatch(ipPattern, self.regExpMatch(peerLinePattern, split[2]))
					#netmask = calc_netmask(self.regExpMatch(netmaskPattern, self.regExpMatch(netmaskLinePattern, split[2])))
					if ip is not None:
						self["localIPval"].setText(ip)
					if peer is not None:
						self["remoteIPval"].setText(peer)
示例#25
0
def delete_comment_page(request, boardname, bid, cid):
  if re_search("\d\d", boardname) and int(boardname) != int(request.user.get_profile().get_pure_sid()):
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당 게시판에 접근할 권한이'})
  page = int(request.GET.get('page', 1))
  try:
    comment = Bulletin.comments.get(id=cid)
  except ObjectDoesNotExist:
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당 코멘트가'})
  try:
    bulletin = Bulletin.bulletIns.get(id=bid)
  except ObjectDoesNotExist:
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당글이'})

  can_delete = not((datetime.now() - comment.created).days >= settings.CAN_MODIFY_DAYS)
  if can_delete and comment.isMyBulletin(request.user):
    # 코멘트 삭제는 포인트 POINT_COMMENT만큼 차감.
    reducePoint(request, comment)
    comment.delete()    # 여기서 댓글을 삭제한다.
    bulletin.commentCnt -= 1
    bulletin.save()
    return HttpResponseRedirect('/board/%s/read/%d/?page=%d'% (boardname, int(bid), page))
  else:
    # 삭제를 못하는 경우이다.
    return HttpResponseRedirect('/board/%s/read/%d/?page=%d'% (boardname, int(bid), page))
示例#26
0
def get_board(boardname):
  # 만약 보드이름이 숫자 두개로 구성되어 있다면 학번보드이다.
  if re_search("\d\d", boardname):
    board, created = Board.objects.get_or_create(name=boardname, title=u'%s학번' % boardname, desc=u'%s학번 게시판 입니다.' % boardname, secret=True)
    if created:
      for category in categories['freeboard']:
        Category.objects.create(board=board, title=category)  # 자유게시판과 동일한 카테고리
    return board
  if specialBoards.has_key(boardname):    # 만약 스페셜 보드인 경우
    boardInfo = specialBoards[boardname]  # 스페셜 보드에서 가져옴
    if boardInfo['real']:  # 진짜 보드인 경우 만들어줌
      return Board.objects.get_or_create(name=boardname, title=boardInfo['title'], desc=boardInfo['desc'])[0]
    else:          # 가짜인 경우 정보만 가져다 씀
      return Board(name=boardname, title=boardInfo['title'], desc=boardInfo['desc'])
  # 일반 board
  try:
    board = Board.objects.get(name=boardname)
  except ObjectDoesNotExist:
    if basicBoards.has_key(boardname):    # 기본 보드에 있는가? 있다면 만든다.
      boardInfo = basicBoards[boardname]
      board = Board.objects.create(name=boardname, title=boardInfo['title'], desc=boardInfo['desc'])
      if boardname == u'subjects':    # 과목게시판은 특별하다
        for subject in subjects:
          if subject[0] == 5:
            Category.objects.create(board=board, title=u'학년무관 - %s'%subject[2])
          elif subject[1] == 3:
            Category.objects.create(board=board, title=u'%d학년 1, 2학기 - %s'%(subject[0], subject[2]))
          else:
            Category.objects.create(board=board, title=u'%d학년 %d학기 - %s'%subject)
      else:
        if categories.has_key(boardname):
          for category in categories[boardname]:
            Category.objects.create(board=board, title=category)
    else:
      board = None
  return board
示例#27
0
    def match(self):
        """
        Matches the paste against a series of regular expressions to determine if the paste is 'interesting'

        Sets the following attributes:
                self.emails
                self.hashes
                self.num_emails
                self.num_hashes
                self.db_keywords
                self.type

        """
        # Get the amount of emails
        self.emails = list(set(regexes['email'].findall(self.text)))
        self.hashes = regexes['hash32'].findall(self.text)
        self.num_emails = len(self.emails)
        self.num_hashes = len(self.hashes)

        if self.num_emails > 0:
            self.sites = list(
                set([
                    re_search('@(.*)$', email).group(1).lower()
                    for email in self.emails
                ]))

        for regex in regexes['db_keywords']:
            if regex.search(self.text):
                # logging.debug('\t[+] ' + regex.search(self.text).group(1))
                self.db_keywords += round(
                    1 / float(len(regexes['db_keywords'])), 2)

        for regex in regexes['blacklist']:
            if regex.search(self.text):
                # logging.debug('\t[-] ' + regex.search(self.text).group(1))
                self.db_keywords -= round(
                    1.25 * (1 / float(len(regexes['db_keywords']))), 2)

        if (self.num_emails >= 20) \
                or (self.num_hashes >= 30) \
                or (self.db_keywords >= .55):
            self.type = 'db_dump'

        if regexes['cisco_hash'].search(
                self.text) or regexes['cisco_pass'].search(self.text):
            self.type = 'cisco'

        if regexes['honeypot'].search(self.text):
            self.type = 'honeypot'

        if regexes['google_api'].search(self.text):
            self.type = 'google_api'

        if regexes['pgp_private'].search(self.text):
            self.type = 'pgp_private'

        if regexes['ssh_private'].search(self.text):
            self.type = 'ssh_private'

        for regex in regexes['banlist']:
            if regex.search(self.text):
                self.type = None
                break

        return self.type
示例#28
0
def read_page(request, boardname, bid):
  if re_search("\d\d", boardname) and int(boardname) != int(request.user.get_profile().get_pure_sid()):
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당 게시판에 접근할 권한이'})
  board = get_board(boardname)

  try:
    if not board.group_board.members.filter(id=request.user.id).exists():
      return render_to_response('noExist.html',{'user':request.user, 'target':'해당 소모임게시판에 접근할 권한이'})
  except ObjectDoesNotExist:
    pass

  if board is None:
    return render_to_response('noExist.html',{'user':request.user, 'target':'게시판이'})
  page = int(request.GET.get('page', 1))    # 페이지

  modify_days = settings.CAN_MODIFY_DAYS

  if "gotoIndex" in request.GET:
    gotoIndex = request.GET["gotoIndex"]
    if not int(gotoIndex):
      gotoIndex = ""
  else:
    gotoIndex = ""

  # 현재 게시물이 뭔지 읽어온다.
  try:
    bulletin = Bulletin.objects.get(id=bid)
    #bulletin.hits += 1       # 조회수를 1 올린다.
    #bulletin.save()
  except ObjectDoesNotExist:
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당글이'})  # 해당글이 없으면 없다는 것으로!

  if request.method == 'POST' and not bulletin.deleted:
    form = CommentForm(data=request.POST, board=board, bulletin=bulletin, user=request.user)
    if form.is_valid():
      form.cleaned_data['content'] = utils.stripXSS(form.cleaned_data['content'])    # Prevent XSS
      form.cleaned_data['content'] = utils.CompleteHTML(form.cleaned_data['content']) # complete HTML
      # 코멘트 작성
      # 기존 댓글에 덧글 수 하나 올려준다
      bulletin.commentCnt += 1
      bulletin.save()
      comment = Bulletin.comments.create(
        rate = form.cleaned_data['starpoint'],
        writer=request.user,
        parent=bulletin,
        #writerIP=request.META['REMOTE_ADDR'][-7:],    # 아이피는 뒤에 일곱자리만 저장합니다.
        writerIP=request.META['REMOTE_ADDR'],      # 아이피를 전부 저장합니다.
        isHiddenUser = form.cleaned_data['nametype'],        # 익명 여부 저장
        board=bulletin.board,    # 보드
        title='comment',                # 제목
        content=form.cleaned_data['content'],      # 내용
        #secret=form.cleaned_data['secret'])        # 비밀여부
        secret=False)
      # 코멘트 작성은 포인트 settings.POINT_COMMENT점
      plusPoint(request, comment)
      #Rate.objects.create(
      #  rate = form.cleaned_data['starpoint'],
      #  bulletin = comment
      #)
      # 익명이면 - 아니면 +를 달아준다.
      if comment.isHiddenUser:
        anomChar = '-'
      else:
        anomChar = '+'

      striped_content = strip_tags(comment.content)
      if len(striped_content) > 19:
        sum_content = '%s...'%striped_content[0:16]
      else:
        sum_content = striped_content[0:19]
      # Feed를 달아준다.
      if bulletin.writer != comment.writer:
        Feed.objects.create(
          url="/board/%s/read/%d/?page=%d&to=%s"%(boardname, int(bid), page, comment.id),
          from_user=request.user,
          to_user=bulletin.writer,
          additional='%s%s'%(anomChar, sum_content),
          type=u'C',
        )
      distinct_comment_user = map(lambda a:User.objects.get(id=a), set(map(lambda a:a['writer'], bulletin.my_comments.values("writer"))))
      for comment_user in distinct_comment_user:
        if comment_user != comment.writer and comment_user != bulletin.writer:
          Feed.objects.create(
            url="/board/%s/read/%d/?page=%d&to=%s"%(boardname, int(bid), page, comment.id),
            from_user=request.user,
            to_user=comment_user,
            additional='%s%s'%(anomChar, sum_content),
            type=u'C',
          )
      # 스크랩한 모든 사람들에게 Feed를 달아준다
      if not comment.secret:
        for like in bulletin.likes.all():
          user = like.user
          url="/board/%s/read/%d/?to=%s"%(boardname, int(bid), comment.id)
          if user != request.user and not Feed.objects.filter(Q(url=url)&Q(to_user=user)).exists():
            Feed.objects.create(
              url=url,
              from_user=request.user,
              to_user=user,
              additional='%s%s'%(anomChar, sum_content),
              type=u'SR',
            )
      try:
        if board.group_board:
          # 소모임 상태를 갱신해준다(new)
          board.group_board.save()
      except ObjectDoesNotExist:
        pass
      bulletin.save()    # 글 상태도 갱신해준다.
      board.save()  # 보드 상태도 갱신
      return HttpResponseRedirect('/board/%s/read/%d/?page=%d'% (boardname, int(bid), page))
  else:
    form = CommentForm(board=board, bulletin=bulletin, user=request.user)

  if not bulletin.hasAuthToRead(request.user):
    bulletin.title = u'권한이 없습니다.'
    bulletin.content = u'권한이 없습니다.'
    bulletin.canRead = False
    commentList = []
  else:
    commentList = Bulletin.comments.filter(parent=bulletin).order_by('created')

  # 만약 삭제되었다면, title과 content를 삭제하여 내보내 준다.
  if bulletin.deleted:
    bulletin.title='삭제된 글입니다.'
    bulletin.content='삭제된 글입니다.'


  if bulletin.gallery and (board.name == "photo" or board.name == "all"):
    bulletin.photos = bulletin.gallery.public()
    for photo in bulletin.photos:
      photo.tags = PhotoTag.objects.filter(photo=photo)

  for comment in commentList:
    #isRated = comment.isMyComment
    if not comment.hasAuthToRead(request.user):
      #comment.writer = None
      comment.title = u'권한이 없습니다.'
      comment.content = u'권한이 없습니다.'
    comment.can_modify = not((datetime.now() - comment.created).days >= modify_days)
    comment.is_my_comment = comment.isMyComment(request.user)

  isRated = False
  rateOrder = 1
  rate = 0
  rate_writer = 1

  # 익명으로 달린 댓글들 처리
  commentHiddenUserList = []
  commentHiddenUserNumber = 1
  for comment in commentList:
    if comment.isHiddenUser:
      if comment.writer == bulletin.writer and bulletin.isHiddenUser:
          comment.hiddenUser = "******"
      else:
        if comment.writer in commentHiddenUserList:
          comment.hiddenUser = "******" % (commentHiddenUserList.index(comment.writer) + 1)
        else:
          commentHiddenUserList.append(comment.writer)
          comment.hiddenUser = "******" % commentHiddenUserNumber
          commentHiddenUserNumber += 1


  try:
    relatedPosition = RelatedPosition.objects.get(bulletin=bulletin)
  except ObjectDoesNotExist:
    relatedPosition = None

  relatedFile = RelatedFile.objects.filter(bulletin=bulletin)
  image_files = []
  other_files = []
  for file in relatedFile:
    file.name = file.file.name.split('/')[-1]
    if file.isImage():
      image_files.append(file)
    else:
      other_files.append(file)

  can_modify = not((datetime.now() - bulletin.created).days >= modify_days)
  tpl = loader.get_template('board/read.html')    # read.html이라는 페이지를 template로 하여 출력합니다.
  ctx = RequestContext(request, {            # parameter를 dictionary형식으로 넣을 수 있습니다.
    'form':form,
    'board':board,
    'bulletin':bulletin,
    'can_modify':can_modify,
    'modify_days':modify_days,
    'page':page,
    'naver_map_key':settings.NAVER_MAP_KEY,
    'scraped':Scrap.objects.filter(user=request.user, bulletin=bulletin).exists(),  # 자신이 스크랩 하였는가?
    'liked':Like.objects.filter(user=request.user, bulletin=bulletin).exists(),  # 자신이 좋아 하였는가?
    'like_list':Like.objects.filter(bulletin=bulletin),
    'scraps':Scrap.objects.filter(bulletin=bulletin),    # 이 글의 스크랩들
    'gotoIndex':gotoIndex,
    'commentList':commentList,
    'isMyBulletin':bulletin.isMyBulletin(request.user),
    'relatedPosition':relatedPosition,    # 연관 위치
    'files':other_files,
    'image_files':image_files,
    'boardname':boardname,
    'rate':rate,
    'isRated':isRated,
    'rateNumber':rateOrder-1,
    'rate_writer':rate_writer,
    'toComment':request.GET.get('to', 0)
    })
  return HttpResponse(tpl.render(ctx))
示例#29
0
def write_page(request, boardname):
  if re_search("\d\d", boardname) and int(boardname) != int(request.user.get_profile().get_pure_sid()):
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당 게시판에 접근할 권한이'})
  board = get_board(boardname)
  try:
    if not board.group_board.members.filter(id=request.user.id).exists():
      return render_to_response('noExist.html',{'user':request.user, 'target':'해당 소모임게시판에 접근할 권한이'})
  except ObjectDoesNotExist:
    pass
  if board is None:
    return render_to_response('noExist.html',{'user':request.user, 'target':'게시판이'})
  if request.method == 'POST':
    form = WriteAndModifyForm(data=request.POST, files=request.FILES, board=board)
    if form.is_valid():
      form.cleaned_data['content'] = utils.stripXSS(form.cleaned_data['content'])     # Prevent XSS
      form.cleaned_data['content'] = utils.CompleteHTML(form.cleaned_data['content']) # complete HTML
      bulletin = Bulletin.bulletIns.create(
        writer=request.user,              # 글쓴이
        #writerIP=request.META['REMOTE_ADDR'][-7:],    # 아이피는 뒤에 일곱자리만 저장합니다.
        writerIP=request.META['REMOTE_ADDR'],      # 아이피를 전부 저장합니다.
        isHiddenUser = form.cleaned_data['nametype'],  # 익명 유저 여부
        board=Board.objects.get(name=boardname),    # 보드이름
        title=form.cleaned_data['title'],        # 제목
        content=form.cleaned_data['content'],      # 내용
        #secret=form.cleaned_data['secret'],
        notice=form.cleaned_data['notice'],
        #category=form.cleaned_data['category'],
      )        # 비밀여부
      if form.cleaned_data['position']:    # 위치정보가 있을 경우
        RelatedPosition.objects.create(
          bulletin=bulletin,
          title=form.cleaned_data['positionTitle'],
          position=form.cleaned_data['position'],
        )
      if form.cleaned_data['file']:  # 파일
        for file in form.cleaned_data['file']:
          RelatedFile.objects.create(
            board=board,
            bulletin=bulletin,
            file=file,
            size=file.size,
          )
      if len(bulletin.title) > 20:
        additionalTitle = '%s...' % bulletin.title[:17]
      else:
        additionalTitle = bulletin.title[:20]
      if form.cleaned_data['gallery'] and board.name == "photo":    # 갤러리가 있을 경우
        # 갤러리를 만들고 거기 올린다.
        bulletin.gallery = GalleryUpload(owner=request.user,
            zip_file=form.cleaned_data['gallery'],
            title=' >> '.join([board.title, form.cleaned_data['title']]),
            #is_public=not form.cleaned_data['gallery_is_not_public']).save()
            is_public=True).save()
        bulletin.save()    # 저장
        # 전체 유저에게 Feed 추가
        for user in User.objects.all():
          if bulletin.writer != user:
            Feed.objects.create(
              url="/board/%s/read/%s/"%(boardname, bulletin.id),
              from_user=bulletin.writer,
              to_user=user,
              additional=additionalTitle,
              type=u'IN',
            )
      # 글 작성시 사용자가 작성한 글이 하나 올라감
      plusPoint(request, bulletin)
      # 공지사항일 경우 모든 사용자에게 Feed를 달아준다.
      if boardname == 'notice':
        for user in User.objects.all():
          if request.user != user:
            Feed.objects.create(
              url="/board/%s/read/%s/"%(boardname, bulletin.id),
              from_user=request.user,
              to_user=user,
              additional=additionalTitle,
              type=u'N',
            )
      try:
        if board.group_board:
          # 소모임의 모든 사람들에게 Feed를 달아준다
          for user in board.group_board.members.all():
            if user != request.user:
              Feed.objects.create(
                url="/board/%s/read/%s/"%(boardname, bulletin.id),
                from_user=request.user,
                to_user=user,
                additional=board.group_board.id,
                type=u'GB',
              )
          # 소모임 상태를 갱신해준다(new)
          board.group_board.save()
      except ObjectDoesNotExist:
        pass
      board.save()  # 보드 상태도 갱신
      return HttpResponseRedirect(reverse('board-list', args=[boardname]))
  else:
    form = WriteAndModifyForm(board=board)

  tpl = loader.get_template('board/write_and_modify.html')    # write.html이라는 페이지를 template로 하여 출력합니다.
  ctx = RequestContext(request, {            # parameter를 dictionary형식으로 넣을 수 있습니다.
    'board':board,
    'form':form,
    'type':'write',
    'typeText':u'작성',
    'boardname':boardname,
    'MAX_A_FILE_SIZE':settings.MAX_A_FILE_SIZE,
    'MAX_TOTAL_FILE_SIZE':settings.MAX_TOTAL_FILE_SIZE,
    })
  return HttpResponse(tpl.render(ctx))
示例#30
0
 def domain_from_link(link:str) -> Optional[str]:
     return re_search(r"(?<=:\/\/)?(([A-Z]|[a-z]|[0-9])+\.)+([A-Z]|[a-z]|[0-9])+", link).group()
示例#31
0
    platf_depend_exit()
# end if

import os
from re import search as re_search
from getopt import gnu_getopt, GetoptError

try:
    opts, args = gnu_getopt(sys.argv[1:], "hvq:",
                            ["help", "version", "query="])
except GetoptError as gerr:
    print(str(gerr))
    platf_depend_exit(2)
# end try

is_fasta = lambda file: False if re_search(r"\.fa(sta)?(\.gz)?$", file
                                           ) is None else True
query_fpath = None

# Get path to query file from argv
for opt, arg in opts:

    if opt in ("-q", "--query"):

        if not os.path.exists(arg):
            print("File '{}' does not exist!".format(arg))
            platf_depend_exit(1)
        # end if

        if not is_fasta(arg):
            print("File '{}' is probably not fasta file.".format(arg))
            print(
示例#32
0
def cmd_depot_download(args):
    pbar = fake_tqdm()
    pbar2 = fake_tqdm()

    try:
        with init_clients(args) as (s, cdn, manifests):
            # calculate total size
            if not args.no_progress or args.name or args.regex:
                total_files = 0
                total_size = 0

                for manifest in manifests:
                    for depotfile in manifest:
                        if not depotfile.is_file:
                            continue
                        if args.name and not fnmatch(depotfile.filename, args.name):
                            continue
                        if args.regex and not re_search(args.regex, depotfile.filename):
                            continue

                        total_files += 1
                        total_size += depotfile.size
            else:
                total_files = sum(map(lambda x: len(x), manifests))
                total_size = sum(map(lambda x: x.size_original, manifests))

            # enable progress bar
            if not args.no_progress and sys.stderr.isatty():
                pbar = tqdm(desc='Downloaded', mininterval=0.5, maxinterval=1, total=total_size, unit=' B', unit_scale=True)
                pbar2 = tqdm(desc='Files     ', mininterval=0.5, maxinterval=1, total=total_files, position=1, unit=' file', unit_scale=False)
                gevent.spawn(pbar.gevent_refresh_loop)
                gevent.spawn(pbar2.gevent_refresh_loop)

            # download files
            tasks = GPool(4)

            for manifest in manifests:
                LOG.info("Processing (%s) '%s' ..." % (manifest.gid, manifest.name))

                for depotfile in manifest:
                    if not depotfile.is_file:
                        continue

                    # filepath filtering
                    if args.name and not fnmatch(depotfile.filename, args.name):
                        continue
                    if args.regex and not re_search(args.regex, depotfile.filename):
                        continue

                    tasks.spawn(depotfile.download_to, args.output,
                                no_make_dirs=args.no_directories,
                                pbar=pbar)

                    pbar2.update(1)

            # wait on all downloads to finish
            tasks.join()
            gevent.sleep(0.5)
    except KeyboardInterrupt:
        pbar.close()
        LOG.info("Download canceled")
        return 1  # error
    except SteamError as exp:
        pbar.close()
        pbar.write(str(exp))
        return 1  # error
    else:
        pbar.close()
        if not args.no_progress:
            pbar2.close()
            pbar2.write('\n')
        LOG.info('Download complete')
示例#33
0
def build_topo_from_json(tgen, topo):
    """
    Reads configuration from JSON file. Adds routers, creates interface
    names dynamically and link routers as defined in JSON to create
    topology. Assigns IPs dynamically to all interfaces of each router.

    * `tgen`: Topogen object
    * `topo`: json file data
    """

    ROUTER_LIST = sorted(topo["routers"].keys(),
                         key=lambda x: int(re_search("\d+", x).group(0)))

    listRouters = ROUTER_LIST[:]
    for routerN in ROUTER_LIST:
        logger.info("Topo: Add router {}".format(routerN))
        tgen.add_router(routerN)
        listRouters.append(routerN)

    if "ipv4base" in topo:
        ipv4Next = ipaddr.IPv4Address(topo["link_ip_start"]["ipv4"])
        ipv4Step = 2**(32 - topo["link_ip_start"]["v4mask"])
        if topo["link_ip_start"]["v4mask"] < 32:
            ipv4Next += 1
    if "ipv6base" in topo:
        ipv6Next = ipaddr.IPv6Address(topo["link_ip_start"]["ipv6"])
        ipv6Step = 2**(128 - topo["link_ip_start"]["v6mask"])
        if topo["link_ip_start"]["v6mask"] < 127:
            ipv6Next += 1
    for router in listRouters:
        topo["routers"][router]["nextIfname"] = 0

    while listRouters != []:
        curRouter = listRouters.pop(0)
        # Physical Interfaces
        if "links" in topo["routers"][curRouter]:

            def link_sort(x):
                if x == "lo":
                    return 0
                elif "link" in x:
                    return int(x.split("-link")[1])
                else:
                    return int(re_search("\d+", x).group(0))

            for destRouterLink, data in sorted(
                    topo["routers"][curRouter]["links"].iteritems(),
                    key=lambda x: link_sort(x[0]),
            ):
                currRouter_lo_json = topo["routers"][curRouter]["links"][
                    destRouterLink]
                # Loopback interfaces
                if "type" in data and data["type"] == "loopback":
                    if ("ipv4" in currRouter_lo_json
                            and currRouter_lo_json["ipv4"] == "auto"):
                        currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format(
                            topo["lo_prefix"]["ipv4"],
                            number_to_row(curRouter),
                            number_to_column(curRouter),
                            topo["lo_prefix"]["v4mask"],
                        )
                    if ("ipv6" in currRouter_lo_json
                            and currRouter_lo_json["ipv6"] == "auto"):
                        currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format(
                            topo["lo_prefix"]["ipv6"],
                            number_to_row(curRouter),
                            number_to_column(curRouter),
                            topo["lo_prefix"]["v6mask"],
                        )

                if "-" in destRouterLink:
                    # Spliting and storing destRouterLink data in tempList
                    tempList = destRouterLink.split("-")

                    # destRouter
                    destRouter = tempList.pop(0)

                    # Current Router Link
                    tempList.insert(0, curRouter)
                    curRouterLink = "-".join(tempList)
                else:
                    destRouter = destRouterLink
                    curRouterLink = curRouter

                if destRouter in listRouters:
                    currRouter_link_json = topo["routers"][curRouter]["links"][
                        destRouterLink]
                    destRouter_link_json = topo["routers"][destRouter][
                        "links"][curRouterLink]

                    # Assigning name to interfaces
                    currRouter_link_json["interface"] = "{}-{}-eth{}".format(
                        curRouter, destRouter,
                        topo["routers"][curRouter]["nextIfname"])
                    destRouter_link_json["interface"] = "{}-{}-eth{}".format(
                        destRouter, curRouter,
                        topo["routers"][destRouter]["nextIfname"])

                    topo["routers"][curRouter]["nextIfname"] += 1
                    topo["routers"][destRouter]["nextIfname"] += 1

                    # Linking routers to each other as defined in JSON file
                    tgen.gears[curRouter].add_link(
                        tgen.gears[destRouter],
                        topo["routers"][curRouter]["links"][destRouterLink]
                        ["interface"],
                        topo["routers"][destRouter]["links"][curRouterLink]
                        ["interface"],
                    )

                    # IPv4
                    if "ipv4" in currRouter_link_json:
                        if currRouter_link_json["ipv4"] == "auto":
                            currRouter_link_json["ipv4"] = "{}/{}".format(
                                ipv4Next, topo["link_ip_start"]["v4mask"])
                            destRouter_link_json["ipv4"] = "{}/{}".format(
                                ipv4Next + 1, topo["link_ip_start"]["v4mask"])
                            ipv4Next += ipv4Step
                    # IPv6
                    if "ipv6" in currRouter_link_json:
                        if currRouter_link_json["ipv6"] == "auto":
                            currRouter_link_json["ipv6"] = "{}/{}".format(
                                ipv6Next, topo["link_ip_start"]["v6mask"])
                            destRouter_link_json["ipv6"] = "{}/{}".format(
                                ipv6Next + 1, topo["link_ip_start"]["v6mask"])
                            ipv6Next = ipaddr.IPv6Address(
                                int(ipv6Next) + ipv6Step)

            logger.debug(
                "Generated link data for router: %s\n%s",
                curRouter,
                json_dumps(topo["routers"][curRouter]["links"],
                           indent=4,
                           sort_keys=True),
            )
示例#34
0
    def regression_cv(self, cv_path='tie_strengths/cv_config.yaml'):
        """
        Performs CV at different levels of overlap
        """
        try:
            conf = yaml.load(open(cv_path))
        except:
            self.paths['cv_path'] = os.path.join(self.run_path,
                                                 'cv_config.yaml')
            conf = yaml.load(open(self.paths['cv_path']))
        params = self.get_variable_transformations(conf['params'])
        cols_pttrns = params.keys()

        try:  #TODO: change this (for db)
            self.paths['full_df']
        except:
            self.paths['full_df'] = os.path.join(self.run_path, 'full_df.txt')

        df = pd.read_table(self.paths['full_df'], sep=' ')
        df = df[df.c_wkn_t > 2]

        print('Table Read \n')
        cols_dic = self.get_cols_dic(cols_pttrns,
                                     df.columns)  # link cols with patterns

        # TODO: add this to a diff function, it's different preprocessing
        pttrn = '_wk(n|l)_(\d+|t|l)'
        df_nas = {col: 0. for col in df.columns if re_search(pttrn, col)}

        df = df.fillna(value=df_nas)
        print('NAs filled\n')
        wkn_cols = [
            n for n, col in enumerate(df.columns)
            if re_search('c_wkn_\d+', col)
        ]
        wkl_cols = [
            n for n, col in enumerate(df.columns)
            if re_search('c_wkl_\d+', col)
        ]
        wks_cols = [
            n for n, col in enumerate(df.columns)
            if re_search('s_wkn_\d+', col)
        ]

        # TODO: check if its faster to apply diff function
        df.loc[:, 'prop_len'] = get_prop_len(df['c_wkl_l'], df['deg_0'],
                                             df['deg_1'], df['n_len_0'],
                                             df['n_len_1'])

        #df.loc[:, 'c_l_dist'] = df.apply(lambda x: np.dot(x[wkn_cols], x[wkl_cols]), axis=1)
        print('First Variable\n')
        del df['c_wkn_0']
        del df['c_wkl_0']
        #del df['s_wkn_0']
        try:
            del df['0']
        except:
            pass
        del df['1']
        del df['n_ij']
        del df['deg_0']
        del df['deg_1']
        try:
            del df['0_1']
        except:
            pass
        try:
            del df['1_1']
        except:
            pass
        try:
            del df['0_0']
        except:
            pass

        df.dropna(inplace=True)
        self.paths['cv_class_stats'] = os.path.join(self.run_path,
                                                    'cv_class_det0_stats.csv')
        w = open(self.paths['cv_class_stats'], 'wb')
        w.write(' '.join([
            'alpha', 'num_1', 'num_1_pred', 'accuracy', 'f1', 'matthews',
            'precision', 'recall'
        ]) + '\n')
        w.close()
        y = df['ovrl']
        del df['ovrl']
        print("Obtaining models\n")
        alphas = [0.0, 0.001, 0.002, 0.004, 0.005, 0.01, 0.015] + list(
            np.arange(0.02, 0.1, .01)) + list(np.arange(0.1, .5, .05)) + list(
                np.arange(.5, .9, 0.1)) + list(np.arange(.09, 1, .01))
        for alpha in alphas:
            y_c = y.apply(lambda x: self._ifelse(x <= alpha, 1, 0))
            x_train, x_test, y_train, y_test = train_test_split(df,
                                                                y_c,
                                                                test_size=0.5)
            rf = RandomForestClassifier()
            rf.fit(x_train, y_train)
            y_pred = rf.predict(x_test)
            ac = accuracy_score(y_test, y_pred)
            f1 = f1_score(y_test, y_pred)
            mth = matthews_corrcoef(y_test, y_pred)
            prc = precision_score(y_test, y_pred)
            rec = recall_score(y_test, y_pred)
            self.write_class_results(alpha, sum(y_c), sum(y_pred), ac, f1, mth,
                                     prc, rec)
            print(str(alpha) + '\n')
示例#35
0
def downloadYT(name, url, skip, delay):
    if "t.co" in url:
        url = req_get(url).url

    if (j := re_search(r"[a-zA-Z_\-0-9]{11,}", url)):
        j = "https://youtu.be/" + j.group(0)
示例#36
0
 def _check_url(url):
     # check and sanitize URL
     url = url.strip("/")
     if re_search(r"\W", url) is not None:
         raise InvalidURLError("Only alpha-numeric characters accepted in URL.")
     return url
示例#37
0
# Firstly check for information-providing flags

if "-h" in sys.argv[1:] or "--help" in sys.argv[1:]:
    print_help()
    platf_depend_exit()
# end if

if "-v" in sys.argv[1:] or "--version" in sys.argv[1:]:
    print(__version__)
    platf_depend_exit()
# end if

import os
from re import search as re_search

is_fastq = lambda f: False if re_search(r".*\.f(ast)?q(\.gz)?$", f
                                        ) is None else True

fpaths = list()

valid_options = ("-h", "--help", "-v", "--version")

for arg in sys.argv[1:]:

    if not arg in valid_options:

        if not os.path.exists(arg):
            print("File '{}' does not exist!".format(arg))
            platf_depend_exit(1)
        # end if

        if not is_fastq(arg):
示例#38
0
    def play_media(self,url):
        wait = WebDriverWait(self.driver, 30)
        action_chains = ActionChains(self.driver)

        self.driver.get(url)
        # 点击视频确保进入视频界面(若出现问题增多可以改换bs4或者re分析)
        media_tag = ['@title="视频"', '@title="视频 "', '@title="微课"', 'last()-1']
        try:
            wait.until(EC.presence_of_element_located(
                (By.XPATH, '//div[@class="left"]/div/div[@class="main"]/div[@class="tabtags"]')))
            for tag in media_tag:
                try:
                    bt = self.driver.find_element_by_xpath(
                        '//div[@class="left"]/div/div[@class="main"]/div[@class="tabtags"]/span['+tag+']')
                    break
                except KeyboardInterrupt:
                    raise KeyboardInterrupt
                except:
                    pass
            sleep(5)
            self.driver.execute_script("arguments[0].scrollIntoView();arguments[0].click();", bt)
        except KeyboardInterrupt:
            raise KeyboardInterrupt
        except:
            pass
        
        try:
            wait.until(EC.presence_of_element_located((By.XPATH,'//div[@class="switchbtn"]')))
            switch_btn=self.driver.find_element_by_xpath('//div[@class="switchbtn"]')
            action_chains.move_to_element(switch_btn)
            switch_btn.click()
        except KeyboardInterrupt:
            raise KeyboardInterrupt
        except:
            pass

        try:
            wait.until(EC.presence_of_element_located((By.XPATH, '//iframe')))
            iframe = self.driver.find_element_by_xpath('//iframe')
            self.driver.switch_to.frame(iframe)
            sleep(1)
        except KeyboardInterrupt:
            raise KeyboardInterrupt
        except:
            print(COLOR.NOTE, ' no videos,continue~', COLOR.END, file=self._out_fp)
            #log_fp.write(' no videos,continue~\n')
            return

        # 多视频处理
        try:
            video_num = self.driver.execute_script(
                "window.scrollTo(0,document.body.scrollHeight);return document.getElementsByClassName('ans-job-icon').length")
        except KeyboardInterrupt:
            raise KeyboardInterrupt
        except:
            video_num = self.driver.execute_script(
                "return document.getElementsByClassName('ans-job-icon').length")
        try:
            self.driver.execute_script("window.scrollTo(0,0)")
        except KeyboardInterrupt:
            raise KeyboardInterrupt
        except:
            pass

        wait.until(EC.presence_of_all_elements_located((By.XPATH, '//div[@class="ans-cc"]')))
        ans_cc = self.driver.find_element_by_xpath('//div[@class="ans-cc"]')
        for i in range(3):
            try:
                h5_text = ans_cc.get_attribute('innerHTML')
            except KeyboardInterrupt:
                raise KeyboardInterrupt
            except:
                sleep(1)
        video_road = PlayMedia.get_road(h5_text, video_num)  # bs4处理得到各个视频路径

        print(COLOR.DISPLAY, ' there are ' + str(video_num) + ' media in this section:', COLOR.END, file=self._out_fp)
        #log_fp.write(' there are ' + str(video_num) + ' videos in this section:\n')

        # 开始播放所有视频
        first_road = '//div[@class="ans-cc"]'

        for v_num in range(1, video_num + 1):
            # log
            #self.driver.refresh()
            self.driver.switch_to.default_content()
            for i in range(5):
                try:
                    wait.until(EC.presence_of_element_located((By.XPATH, '//iframe')))
                    iframe = self.driver.find_element_by_xpath('//iframe')
                    self.driver.switch_to.frame(iframe)
                    break
                except KeyboardInterrupt:
                    raise KeyboardInterrupt
                except:
                    sleep(i+0.5)

            print(COLOR.DISPLAY, ' go ' + str(v_num) + ':', COLOR.END, file=self._out_fp)
            #log_fp.write(' go ' + str(v_num) + ':\n')
            # 拖动滚动条
            #self.driver.execute_script("window.scrollTo(0,arguments[0])", 400 + 700 * (v_num - 1))
            sleep(2)

            goal_road = first_road + video_road[v_num - 1]
            # 查看是否有任务点标识并查看是或否已经完成该任务点
            try:
                flag = self.driver.find_element_by_xpath(goal_road)
                self.driver.execute_script(
                    '''var goal=document.evaluate(arguments[0],document).iterateNext();goal.scrollIntoView();''', goal_road)
                sleep(5)
                icon_flag = 1
                nowflag = flag.get_attribute('class')
                if 'finished' in nowflag:
                    print(COLOR.OK + ' Well! the video is already finished! continue~' + COLOR.END, file=self._out_fp)
                    #log_fp.write(' Well! the video is already finished! continue~' + '\n')
                    self._end = 0
                    # 如果视频任务已完成,访问下一个视频
                    continue
            except KeyboardInterrupt:
                raise KeyboardInterrupt
            except:
                #print(traceback.format_exc())
                icon_flag = 0

            #print(1)
            # try:
            iframe_flag = 0
            for i in range(10):
                try:
                    wait.until(EC.presence_of_element_located((By.XPATH, goal_road + '/iframe')))
                    iframe = self.driver.find_element_by_xpath(goal_road + '/iframe')
                    self.driver.switch_to.frame(iframe)
                    iframe_flag = 1
                    sleep(2)
                    break
                except KeyboardInterrupt:
                    raise KeyboardInterrupt
                except:
                    #print(traceback.format_exc())
                    # log
                    sleep(i+1)
            if iframe_flag == 0:
                print(COLOR.ERR+"  can't into the video,continue"+COLOR.END, file=self._out_fp)
                continue

            #print(2)

            try:
                ppt_num = eval(self.driver.execute_script("return document.getElementsByClassName('all')[0].innerText"))
                for i in range(0, ppt_num):
                    self.driver.execute_script("document.getElementsByClassName('mkeRbtn')[0].click()")
                    sleep(1)
                continue
            except KeyboardInterrupt:
                raise KeyboardInterrupt
            except:
                pass

            #print(2.5)

            # 通过js代码开始视频播放
            play_ok = 0
            for i in range(3):
                try:
                    self.driver.execute_script(
                        """
                        var video=document.querySelector('video');video.scrollIntoView();video.play();
                        video.onmouseout=function(){return false;}
                        """)
                    play_ok = 1
                    sleep(2)
                    self.driver.execute_script("document.querySelector('video').autoplay=true;")
                    self.driver.execute_script("document.querySelector('video').play();")
                    self.driver.execute_script(
                        "document.querySelector('video').playbackRate=arguments[0];document.querySelector('video').defaultPlaybackRate=arguments[0]", PlayMedia.rate)
                    sleep(1)
                    #self.driver.execute_script("document.querySelector('video').load();")
                    break
                except KeyboardInterrupt:
                    raise KeyboardInterrupt
                except:
                    #print(format_exc())
                    send_err(format_exc())
                    sleep(i+1)
            #print(3)

            audio = 0
            if play_ok == 0:
                for i in range(3):
                    try:
                        self.driver.execute_script(
                            "var audio=document.querySelector('audio');audio.scrollIntoView();audio.play();audio.onmouseout=function(){return false;}")
                        play_ok = 1
                        audio = 1
                        self.driver.execute_script("document.querySelector('audio').autoplay=true;")
                        self.driver.execute_script(
                            "document.querySelector('audio').playbackRate=arguments[0];document.querySelector('audio').defaultPlaybackRate=arguments[0]", PlayMedia.rate)
                        #self.driver.execute_script("document.querySelector('audio').load();")
                        break
                    except KeyboardInterrupt:
                        raise KeyboardInterrupt
                    except:
                        sleep(i+1)
            if audio == 1:
                media_type = 'audio'
            else:
                media_type = 'video'

            #print(media_type)

            if play_ok == 0:
                # 未播放成功
                self.driver.switch_to.parent_frame()
                print(COLOR.DISPLAY+' this is not a media, go ahead!'+COLOR.END, file=self._out_fp)
                #log_fp.write(" this is not a video, go ahead!\n")
                continue
            else:
                # 开倍速 & 获取时间信息
                sleep(2)
                for i in range(5):
                    total_tm = self.driver.execute_script(
                        "return document.querySelector(arguments[0]).duration", media_type)
                    #print(total_tm)
                    now_tm = self.driver.execute_script(
                        "return document.querySelector(arguments[0]).currentTime", media_type)
                    #print(now_tm)
                    self.driver.execute_script("document.querySelector(arguments[0]).play();", media_type)
                    if total_tm != None and now_tm != None:
                        break
                    else:
                        sleep(i+1)
                total_tm = int(total_tm)
                now_tm = int(now_tm)
                need_tm = total_tm-now_tm
                print("   now_tm:", now_tm, '\t', "total_tm:", total_tm, '\t', "need_tm:", need_tm, file=self._out_fp)

            #print(4)

            real_time = 0
            while 1:
                real_time += 10
                try:
                    now_tm = self.driver.execute_script(
                        "return document.querySelector(arguments[0]).currentTime", media_type)
                    need_tm = total_tm-int(now_tm)
                    self.driver.execute_script("document.querySelector(arguments[0]).play();", media_type)
                except KeyboardInterrupt:
                    raise KeyboardInterrupt
                except:
                    pass
                # 交互
                progress = (total_tm-need_tm)*100/total_tm
                print(COLOR.OK+"   progress:{0:.2f}%\trest:{1}         ".format(progress,
                                                                                need_tm)+COLOR.END, file=self._out_fp, end="\r")
                self._out_fp.flush()

                # 剩余时间<5min则间隔性检验任务是否已完成
                if (icon_flag == 1 and need_tm <= 300):
                    self.driver.switch_to.parent_frame()
                    flag = self.driver.find_element_by_xpath(goal_road)
                    nowflag = flag.get_attribute('class')
                    self.driver.switch_to.frame(self.driver.find_element_by_xpath(goal_road + '/iframe'))
                    if 'finished' in nowflag:
                        print(COLOR.OK, ' Well!the video is finished ahead of time! continue~', COLOR.END, file=self._out_fp)
                        #log_fp.write(' Well!the video is finished ahead of time! continue~' + '\n')
                        sleep(10)
                        break

                if need_tm <= 2 or real_time > (total_tm+100):
                    print(COLOR.OK, ' Well!the video is finished! continue~', COLOR.END, file=self._out_fp)
                    #log_fp.write(' Well!the video is finished! continue~' + '\n')
                    sleep(10)
                    break

                # 自动检测答题
                pre = 1  # 选项默认值
                try:
                    uls = self.driver.find_element_by_xpath(
                        '//div[@class="x-container ans-timelineobjects x-container-default"]/span/div/div/ul')
                    que_type = self.driver.find_element_by_xpath(
                        '//div[@class="ans-videoquiz-title"]').get_attribute('textContent')
                    #log_fp.write('que_type:' + que_type + '\n')
                    que_type = re_search(r'[[]([\w\W]+?)[]]', que_type).group(1)
                    #log_fp.write('      monitor question,' + que_type + '\n')
                    if "多选题" in que_type:
                        # print(uls.find_elements_by_xpath('//li[@class="ans-videoquiz-opt"]'))
                        opt_num = len(uls.find_elements_by_xpath('//li[@class="ans-videoquiz-opt"]'))  # 选项个数
                        #print(opt_num,file=self._out_fp)
                        for opt_i in range(2, opt_num + 1):  # 选择个数2,3,4,……
                            fin_que = 1
                            for opt_j in range(1, opt_num - opt_i + 2):  # 起始位置
                                #print('      select:',file=self._out_fp)
                                for opt_k in range(0, opt_i):  # 个数
                                    option = uls.find_element_by_xpath('//li[' + str(opt_j + opt_k) + ']/label/input')
                                    self.driver.execute_script("arguments[0].click();", option)
                                sleep(5)
                                bn = self.driver.find_element_by_xpath(
                                    '//div[@class="x-container ans-timelineobjects x-container-default"]/span/div/div/div[2]')
                                self.driver.execute_script("arguments[0].click();", bn)
                                try:
                                    self.driver.switch_to_alert().accept()
                                except:
                                    fin_que = 0
                                    break
                                try:
                                    while 1:  # 多选题答错会弹出不止一个alert
                                        self.driver.switch_to_alert().accept()
                                except:
                                    sleep(0.5)

                                for opt_k in range(0, opt_i):  # 个数
                                    option = uls.find_element_by_xpath('//li[' + str(opt_j + opt_k) + ']/label/input')
                                    self.driver.execute_script("arguments[0].click();", option)
                                sleep(5)
                                bn = self.driver.find_element_by_xpath(
                                    '//div[@class="x-container ans-timelineobjects x-container-default"]/span/div/div/div[2]')
                                self.driver.execute_script("arguments[0].click();", bn)
                                try:
                                    while 1:  # 多选题答错会弹出不止一个alert
                                        self.driver.switch_to_alert().accept()
                                except:
                                    sleep(0.5)
                                sleep(0.5)

                            if fin_que == 0:
                                break
                        #log_fp.write('      solve the question\n')
                        sleep(10)
                    else:
                        while 1:
                            try:
                                option = uls.find_element_by_xpath('//li[' + str(pre) + ']/label/input')
                                self.driver.execute_script("arguments[0].click();", option)
                                # action_chains.move_to_element(option)
                                # option.click()
                                #log_fp.write('      select ' + chr(pre + 64) + '\n')
                                bn = self.driver.find_element_by_xpath(
                                    '//div[@class="x-container ans-timelineobjects x-container-default"]/span/div/div/div[2]')
                                self.driver.execute_script("arguments[0].click();", bn)
                                # action_chains.move_to_element(bn)
                                # bn.click()
                                # action_chains.click(bn)
                                try:
                                    while 1:
                                        self.driver.switch_to_alert().accept()
                                except KeyboardInterrupt:
                                    raise KeyboardInterrupt
                                except:
                                    sleep(0.3)
                                    pre += 1
                            except KeyboardInterrupt:
                                raise KeyboardInterrupt
                            except:
                                #log_fp.write('      solve the question\n')
                                sleep(10)
                                break
                except KeyboardInterrupt:
                    raise KeyboardInterrupt
                except:  # 10s延时
                    sleep(10)

            print(COLOR.OK+' finish the video                     '+COLOR.END, file=self._out_fp)
示例#39
0
    def wrapper(self, writer, key, *args, **kwargs):
        if re_search(writer.filter, key) is None:
            logging.getLogger(writer.scope).info(f'{writer} ignoring {key}')
            return

        return f(self, writer, key, *args, **kwargs)
示例#40
0
文件: topojson.py 项目: stoza/frr
def build_topo_from_json(tgen, topo):
    """
    Reads configuration from JSON file. Adds routers, creates interface
    names dynamically and link routers as defined in JSON to create
    topology. Assigns IPs dynamically to all interfaces of each router.
    * `tgen`: Topogen object
    * `topo`: json file data
    """

    ROUTER_LIST = sorted(topo["routers"].keys(),
                         key=lambda x: int(re_search("\d+", x).group(0)))

    SWITCH_LIST = []
    if "switches" in topo:
        SWITCH_LIST = sorted(topo["switches"].keys(),
                             key=lambda x: int(re_search("\d+", x).group(0)))

    listRouters = sorted(ROUTER_LIST[:])
    listSwitches = sorted(SWITCH_LIST[:])
    listAllRouters = deepcopy(listRouters)
    dictSwitches = {}

    for routerN in ROUTER_LIST:
        logger.info("Topo: Add router {}".format(routerN))
        tgen.add_router(routerN)

    for switchN in SWITCH_LIST:
        logger.info("Topo: Add switch {}".format(switchN))
        dictSwitches[switchN] = tgen.add_switch(switchN)

    if "ipv4base" in topo:
        ipv4Next = ipaddress.IPv4Address(topo["link_ip_start"]["ipv4"])
        ipv4Step = 2**(32 - topo["link_ip_start"]["v4mask"])
        if topo["link_ip_start"]["v4mask"] < 32:
            ipv4Next += 1
    if "ipv6base" in topo:
        ipv6Next = ipaddress.IPv6Address(topo["link_ip_start"]["ipv6"])
        ipv6Step = 2**(128 - topo["link_ip_start"]["v6mask"])
        if topo["link_ip_start"]["v6mask"] < 127:
            ipv6Next += 1
    for router in listRouters:
        topo["routers"][router]["nextIfname"] = 0

    router_count = 0
    while listRouters != []:
        curRouter = listRouters.pop(0)
        # Physical Interfaces
        if "links" in topo["routers"][curRouter]:
            for destRouterLink, data in sorted(
                    topo["routers"][curRouter]["links"].iteritems()):
                currRouter_lo_json = topo["routers"][curRouter]["links"][
                    destRouterLink]
                # Loopback interfaces
                if "type" in data and data["type"] == "loopback":
                    router_count += 1
                    if ("ipv4" in currRouter_lo_json
                            and currRouter_lo_json["ipv4"] == "auto"):
                        currRouter_lo_json["ipv4"] = "{}{}.{}/{}".format(
                            topo["lo_prefix"]["ipv4"],
                            router_count,
                            number_to_column(curRouter),
                            topo["lo_prefix"]["v4mask"],
                        )
                    if ("ipv6" in currRouter_lo_json
                            and currRouter_lo_json["ipv6"] == "auto"):
                        currRouter_lo_json["ipv6"] = "{}{}:{}/{}".format(
                            topo["lo_prefix"]["ipv6"],
                            router_count,
                            number_to_column(curRouter),
                            topo["lo_prefix"]["v6mask"],
                        )

                if "-" in destRouterLink:
                    # Spliting and storing destRouterLink data in tempList
                    tempList = destRouterLink.split("-")

                    # destRouter
                    destRouter = tempList.pop(0)

                    # Current Router Link
                    tempList.insert(0, curRouter)
                    curRouterLink = "-".join(tempList)
                else:
                    destRouter = destRouterLink
                    curRouterLink = curRouter

                if destRouter in listRouters:
                    currRouter_link_json = topo["routers"][curRouter]["links"][
                        destRouterLink]
                    destRouter_link_json = topo["routers"][destRouter][
                        "links"][curRouterLink]

                    # Assigning name to interfaces
                    currRouter_link_json["interface"] = "{}-{}-eth{}".format(
                        curRouter, destRouter,
                        topo["routers"][curRouter]["nextIfname"])
                    destRouter_link_json["interface"] = "{}-{}-eth{}".format(
                        destRouter, curRouter,
                        topo["routers"][destRouter]["nextIfname"])

                    # add link interface
                    destRouter_link_json[
                        "peer-interface"] = "{}-{}-eth{}".format(
                            curRouter, destRouter,
                            topo["routers"][curRouter]["nextIfname"])
                    currRouter_link_json[
                        "peer-interface"] = "{}-{}-eth{}".format(
                            destRouter, curRouter,
                            topo["routers"][destRouter]["nextIfname"])

                    topo["routers"][curRouter]["nextIfname"] += 1
                    topo["routers"][destRouter]["nextIfname"] += 1

                    # Linking routers to each other as defined in JSON file
                    tgen.gears[curRouter].add_link(
                        tgen.gears[destRouter],
                        topo["routers"][curRouter]["links"][destRouterLink]
                        ["interface"],
                        topo["routers"][destRouter]["links"][curRouterLink]
                        ["interface"],
                    )

                    # IPv4
                    if "ipv4" in currRouter_link_json:
                        if currRouter_link_json["ipv4"] == "auto":
                            currRouter_link_json["ipv4"] = "{}/{}".format(
                                ipv4Next, topo["link_ip_start"]["v4mask"])
                            destRouter_link_json["ipv4"] = "{}/{}".format(
                                ipv4Next + 1, topo["link_ip_start"]["v4mask"])
                            ipv4Next += ipv4Step
                    # IPv6
                    if "ipv6" in currRouter_link_json:
                        if currRouter_link_json["ipv6"] == "auto":
                            currRouter_link_json["ipv6"] = "{}/{}".format(
                                ipv6Next, topo["link_ip_start"]["v6mask"])
                            destRouter_link_json["ipv6"] = "{}/{}".format(
                                ipv6Next + 1, topo["link_ip_start"]["v6mask"])
                            ipv6Next = ipaddress.IPv6Address(
                                int(ipv6Next) + ipv6Step)

            logger.debug(
                "Generated link data for router: %s\n%s",
                curRouter,
                json_dumps(topo["routers"][curRouter]["links"],
                           indent=4,
                           sort_keys=True),
            )

    switch_count = 0
    add_switch_to_topo = []
    while listSwitches != []:
        curSwitch = listSwitches.pop(0)
        # Physical Interfaces
        if "links" in topo["switches"][curSwitch]:
            for destRouterLink, data in sorted(
                    topo["switches"][curSwitch]["links"].items()):

                # Loopback interfaces
                if "dst_node" in data:
                    destRouter = data["dst_node"]

                elif "-" in destRouterLink:
                    # Spliting and storing destRouterLink data in tempList
                    tempList = destRouterLink.split("-")
                    # destRouter
                    destRouter = tempList.pop(0)
                else:
                    destRouter = destRouterLink

                if destRouter in listAllRouters:

                    topo["routers"][destRouter]["links"][curSwitch] = deepcopy(
                        topo["switches"][curSwitch]["links"][destRouterLink])

                    # Assigning name to interfaces
                    topo["routers"][destRouter]["links"][curSwitch][
                        "interface"] = "{}-{}-eth{}".format(
                            destRouter, curSwitch,
                            topo["routers"][destRouter]["nextIfname"])

                    topo["switches"][curSwitch]["links"][destRouter][
                        "interface"] = "{}-{}-eth{}".format(
                            curSwitch, destRouter,
                            topo["routers"][destRouter]["nextIfname"])

                    topo["routers"][destRouter]["nextIfname"] += 1

                    # Add links
                    dictSwitches[curSwitch].add_link(
                        tgen.gears[destRouter],
                        topo["switches"][curSwitch]["links"][destRouter]
                        ["interface"],
                        topo["routers"][destRouter]["links"][curSwitch]
                        ["interface"],
                    )

                    # IPv4
                    if "ipv4" in topo["routers"][destRouter]["links"][
                            curSwitch]:
                        if (topo["routers"][destRouter]["links"][curSwitch]
                            ["ipv4"] == "auto"):
                            topo["routers"][destRouter]["links"][curSwitch][
                                "ipv4"] = "{}/{}".format(
                                    ipv4Next, topo["link_ip_start"]["v4mask"])
                            ipv4Next += 1
                    # IPv6
                    if "ipv6" in topo["routers"][destRouter]["links"][
                            curSwitch]:
                        if (topo["routers"][destRouter]["links"][curSwitch]
                            ["ipv6"] == "auto"):
                            topo["routers"][destRouter]["links"][curSwitch][
                                "ipv6"] = "{}/{}".format(
                                    ipv6Next, topo["link_ip_start"]["v6mask"])
                            ipv6Next = ipaddr.IPv6Address(
                                int(ipv6Next) + ipv6Step)

            logger.debug(
                "Generated link data for router: %s\n%s",
                curRouter,
                json_dumps(topo["routers"][curRouter]["links"],
                           indent=4,
                           sort_keys=True),
            )
示例#41
0
文件: loaders.py 项目: narimantos/ttp
def load_files(path, extensions=[], filters=[], read=False):
    """
    Method to load files from path, and filter file names with
    REs filters and extensions.
    Args:
        path (str): string that contains OS path
        extensions (list): list of strings files' extensions like ['txt', 'log', 'conf']
        filters (list): list of strings regexes to filter files
        read (bool): if False will return file names, if true will
    Returns:
        List of (type, text_data) tuples or empty list []  if
        read True, if read False return (type, url,) or []
    """
    files = []
    # need to use path[:5000] cause if path is actually text of the template
    # and has length more then X symbols, os.path will choke with "path too long"
    # error, hence the safe-assumption that no os path exists longer then 5000 symbols

    # check if structured, non text, data given, return it as is if so
    # to process within input macro/function
    if not isinstance(path, str):
        return [(
            "structured_data",
            path,
        )]
    elif _ttp_["python_major_version"] == 2:
        if not isinstance(
                path,
            (
                unicode,
                str,
            ),
        ):
            return [(
                "structured_data",
                path,
            )]

    # check if path is a reference to template in ttp_templates collection
    if path.startswith("ttp://"):
        from ttp_templates import get_template
        return [("text_data", get_template(path=path.replace("ttp://", "")))]
    # check if path is a path to file:
    elif os.path.isfile(path[:5000]):
        if read:
            try:
                if _ttp_["python_major_version"] == 2:
                    with open(path, "r") as file_obj:
                        return [(
                            "text_data",
                            file_obj.read(),
                        )]
                with open(path, "r", encoding="utf-8") as file_obj:
                    return [(
                        "text_data",
                        file_obj.read(),
                    )]
            except UnicodeDecodeError:
                log.warning(
                    'ttp_utils.load_files: Unicode read error, file "{}"'.
                    format(path))
        else:
            return [(
                "file_name",
                path,
            )]
    # check if path is a directory:
    elif os.path.isdir(path[0:5000]):
        from re import search as re_search

        files = [f for f in os.listdir(path) if os.path.isfile(path + f)]
        if extensions:
            files = [f for f in files if f.split(".")[-1] in extensions]
        for filter in filters:
            files = [f for f in files if re_search(filter, f)]
        if read:
            ret = []
            for f in files:
                if _ttp_["python_major_version"] == 2:
                    with open((path + f), "r") as file_obj:
                        ret.append((
                            "text_data",
                            file_obj.read(),
                        ))
                elif _ttp_["python_major_version"] == 3:
                    with open((path + f), "r", encoding="utf-8") as file_obj:
                        ret.append((
                            "text_data",
                            file_obj.read(),
                        ))
            return ret
        else:
            return [(
                "file_name",
                path + f,
            ) for f in files]
    # check if path is a string:
    elif isinstance(path, str):
        return [(
            "text_data",
            path,
        )]
    # check if py2, if so check if path is unicode string:
    elif _ttp_["python_major_version"] == 2:
        if isinstance(path, unicode):
            return [(
                "text_data",
                path,
            )]
    else:
        return []
示例#42
0
    def version_string(string: str):
        search = re_search(r"v[0-9]+(\.[0-9]+){0,2}", string)

        return search.group(0) if search else ""
示例#43
0
    def params_cross_validation(self, cv_path='tie_strengths/cv_config.yaml'):
        try:
            conf = yaml.load(open(cv_path))
        except:
            self.paths['cv_path'] = os.path.join(self.run_path,
                                                 'cv_config.yaml')
            conf = yaml.load(open(self.paths['cv_path']))
        params = self.get_variable_transformations(conf['params'])
        cols_pttrns = params.keys()
        try:  #TODO: change this (for db)
            self.paths['full_df']
        except:
            self.paths['full_df'] = os.path.join(self.run_path, 'full_df.txt')

        df = pd.read_table(self.paths['full_df'], sep=' ')
        print('Table Read \n')
        cols_dic = self.get_cols_dic(cols_pttrns,
                                     df.columns)  # link cols with patterns

        # TODO: add this to a diff function, it's different preprocessing
        pttrn = '_wk(n|l)_(\d+|t|l)'
        df_nas = {col: 0. for col in df.columns if re_search(pttrn, col)}
        df = df.fillna(value=df_nas)
        print('NAs filled\n')
        wkn_cols = [
            n for n, col in enumerate(df.columns)
            if re_search('c_wkn_\d+', col)
        ]
        wkl_cols = [
            n for n, col in enumerate(df.columns)
            if re_search('c_wkl_\d+', col)
        ]
        wks_cols = [
            n for n, col in enumerate(df.columns)
            if re_search('s_wkn_\d+', col)
        ]

        # TODO: check if its faster to apply diff function
        df.loc[:, 'prop_len'] = get_prop_len(df['c_wkl_l'], df['deg_0'],
                                             df['deg_1'], df['n_len_0'],
                                             df['n_len_1'])

        #df.loc[:, 'c_l_dist'] = df.apply(lambda x: np.dot(x[wkn_cols], x[wkl_cols]), axis=1)
        print('First Variable\n')
        del df['c_wkn_0']
        del df['c_wkl_0']
        #del df['s_wkn_0']
        try:
            del df['0']
        except:
            pass
        del df['1']
        del df['n_ij']
        del df['deg_0']
        del df['deg_1']
        try:
            del df['0_1']
        except:
            pass
        try:
            del df['1_1']
        except:
            pass
        try:
            del df['0_0']
        except:
            pass

        self.paths['cv_stats'] = os.path.join(self.run_path,
                                              conf['output_file'])
        w = open(self.paths['cv_stats'], 'wb')
        w.write(' '.join(cols_pttrns +
                         ['sms', 'n_row', 'score', 'model', 'n']) + '\n')
        print("Obtaining models\n")
        w.close()
        for comb in product(*params.values()):
            transf, nas = self.parse_variable_combinations(
                cols_pttrns, cols_dic, comb)
            proc_df = self.df_preprocessing(transf, nas, df)
            y = proc_df['ovrl']
            del proc_df['ovrl']
            x_train, x_test, y_train, y_test = train_test_split(proc_df,
                                                                y,
                                                                test_size=0.3)
            rf = RandomForestRegressor()
            rf.fit(x_train, y_train)
            sc = rf.score(x_test, y_test)
            self.write_results(w, comb, 1, proc_df.shape[0], sc, 'RF')

            #svm = SVR()
            #svm.fit(x_train, y_train)
            #sc = svm.score(x_test, y_test)
            #self.write_results(w, comb, 1, proc_df.shape[0], sc, 'RF')

            #print('2\n')
            transf = self.remove_sms_cols(transf)
            proc_df = self.df_preprocessing(transf, nas, df, drop_sms=True)
            y = proc_df['ovrl']
            del proc_df['ovrl']
            x_train, x_test, y_train, y_test = train_test_split(proc_df,
                                                                y,
                                                                test_size=0.5)
            rf = RandomForestRegressor()
            rf.fit(x_train, y_train)
            sc = rf.score(x_test, y_test)
            self.write_results(w, comb, 0, proc_df.shape[0], sc, 'RF')

            print('2\n')
示例#44
0
def wUnloadByXlsxCol(arg=None, cfg=None, sql_root=None, res_root=None):
    logger.info("Unload_By_Xlsx_Col (using UNLOAD_ACTIONS_LIST)")
    try:
        xls_name_list = list(sql_root.glob('**/*.xlsx'))
        if (len(xls_name_list) == 0):
            logger.error("{0}: {1} {2}".format(
                datetime.datetime.now().strftime('%d %b %Y %H:%M:%S'),
                "No XLSX in", sql_root))
            return False
        else:
            if not res_root.exists():
                res_root.mkdir(parents=True)
            selected_action = cfg['UNLOAD_ACTIONS_LIST'][str(arg.section)]
            sql_content = CheckSQLSafety(open(selected_action, 'r').read())
            if sql_content:
                dd = None
                if arg.db is None:
                    dd = cfg[cfg['UNLOAD_ACTIONS_LIST']['db_alias']]
                    logger.info('{0} / {1} / DB: {2}'.format(
                        str(arg.section), 'SQL - OK',
                        cfg['UNLOAD_ACTIONS_LIST']['db_alias']))
                else:
                    dd = cfg[arg.db]
                    logger.info('{0} / {1} / DB: {2}'.format(
                        str(arg.section), 'SQL - OK', str(arg.db)))
                cnn = ConnDb('ora', dd)
                if cnn:
                    curs = cnn.cursor()
                    sr = Path(
                        str(cfg['UNLOAD_ACTIONS_LIST']['saved_filenames']))
                    if not sr.is_dir() and not sr.parent.exists():
                        sr.parent.mkdir(parents=True)
                    if arg.save_filenames is True and not sr.is_dir():
                        ftemp = sr.open('w')
                    else:
                        ftemp = tempfile.TemporaryFile('w')
                    try:
                        for i in xls_name_list:
                            try:
                                wb = openpyxl.Workbook(optimized_write=True,
                                                       guess_types=False)
                                ws = wb.create_sheet()
                                file_in = str(i)
                                file_out_prefix = None
                                if arg.use_src_subfolders:
                                    file_out_prefix = res_root.joinpath(
                                        *i.parts[1:]).parent
                                else:
                                    file_out_prefix = res_root
                                if not file_out_prefix.exists():
                                    file_out_prefix.mkdir(parents=True)
                                file_out = file_out_prefix / ''.join(
                                    [Path(i.name).stem, '_END.xlsx'])
                                load_book = openpyxl.load_workbook(
                                    file_in,
                                    read_only=True,
                                    use_iterators=True)
                                load_sheet = load_book.active
                                agg = []
                                r_count = 0
                                logger.info(' -> file : {0}'.format(i.name))
                                logger.info('  > start: {0}'.format(
                                    datetime.datetime.now().strftime(
                                        '%d %b %Y %H:%M:%S')))
                                try:
                                    for r in load_sheet.rows:
                                        rs = []
                                        agg = [
                                            string_none(cc.value) for cc in r
                                        ]
                                        patt = '^.*$'
                                        if agg[int(cfg['UNLOAD_ACTIONS_LIST']
                                                   ['kolonka'])] is not None:
                                            z = str(agg[int(
                                                cfg['UNLOAD_ACTIONS_LIST']
                                                ['kolonka'])]).strip()
                                            if not re_search(patt, z):
                                                ws.append(agg)
                                                r_count += 1
                                            else:
                                                curs.execute(sql_content, S1=z)
                                                is_not_empty = 0
                                                abc = curs.fetchall()
                                                if len(abc) != 0:
                                                    is_not_empty = 1
                                                    for ii in abc:
                                                        a = list(ii)
                                                        rs = agg + [
                                                            string_none(x)
                                                            for x in a
                                                        ]
                                                        ws.append(rs)
                                                if is_not_empty == 0:
                                                    ws.append(agg)
                                                r_count += 1
                                                time.sleep(0.2)
                                            logger.info('    {0}{1}'.format(
                                                str(r_count).ljust(7), z))
                                        else:
                                            ws.append(agg)
                                            r_count += 1
                                    wb.save(str(file_out))
                                    ftemp.write(str(file_out) + '\n')
                                    logger.info('  > stop : {0}\n'.format(
                                        datetime.datetime.now().strftime(
                                            '%d %b %Y %H:%M:%S')))
                                except Exception as eee:
                                    logger.error("{0}: {1}\n{2}\n{3}\n".format(
                                        datetime.datetime.now().strftime(
                                            '%d %b %Y %H:%M:%S'), i,
                                        agg[int(cfg['UNLOAD_ACTIONS_LIST']
                                                ['kolonka'])], eee))
                                    continue
                            except Exception as eee:
                                logger.error("{0}: {1}\n{2}\n".format(
                                    datetime.datetime.now().strftime(
                                        '%d %b %Y %H:%M:%S'), i, eee))
                                continue
                    finally:
                        logger.info('> close connection')
                        cnn.close()
                        ftemp.close()
                else:
                    return False
            else:
                return False
    except Exception as zz:
        logger.error("{0} - ERROR: {1}\n".format(
            datetime.datetime.now().strftime('%d %b %Y %H:%M:%S'), zz))
        return False
示例#45
0
    def __init__(self, docstring):
        self.args = {}
        self.match = False
        self.options = []
        self.program = None
        self.closest_matches = []
        self.docstring = docstring

        # Parse the 'Usage' section of the doc string.  Create a
        # CommandSequence object for every line in Usage, store those
        # objects on the self.commands list.
        state = None
        self.commands = []
        found_help_option = False

        for line in docstring.split('\n'):
            if line.startswith('Usage'):
                state = 'Usage'
                continue

            elif line.startswith('Help'):
                break

            elif line.startswith('Options'):
                break

            elif line == '':
                continue

            if state == 'Usage':
                if '--help' in line:
                    found_help_option = True
                self.commands.append(CommandSequence(line))

                if self.program is None:
                    result = re_search('^\s+(\S+)', line)
                    if result:
                        self.program = result.group(1)

        # Automagically add a (-h|--help) option
        if not found_help_option:
            line = '%s (-h|--help)' % self.program
            self.commands.append(CommandSequence(line))

        if logging.getLogger().getEffectiveLevel() == logging.DEBUG:
            for cmd in self.commands:
                logging.debug('CMD TEXT: %s' % cmd.text)
                logging.debug('CMD TOKENS')
                logging.debug(cmd)
                logging.debug('')

        # Now loop over all of the CommandSequence objects and build a list
        # of every kind of token in the doc string
        all_tokens = []
        for cmd in self.commands:
            for token in cmd.tokens:
                all_tokens += token.words
        all_tokens = set(all_tokens)

        # Command sequence all_tokens should only cover options found in that docstring
        # entry. Not the all docstring tokens.
        for cmd in self.commands:
            cmd.all_tokens = []
            for token in cmd.tokens:
                cmd.all_tokens += token.words

        # The 1st item in argv is the program name...ignore it
        self.argv = sys.argv[1:]

        # Init all tokens in args to None
        for x in all_tokens:
            self.args[x] = None

        candidates = []
        for cmd in self.commands:
            if cmd.argv_matches_tokens(self.argv):
                candidates.append(cmd)

        # This is a bad thing
        if not candidates:

            logging.debug("There are no candidates")

            high_score = -1
            scores = {}
            options_by_score = {}

            for cmd in self.commands:
                score = cmd.score

                if score > high_score:
                    high_score = score

                if score not in scores:
                    scores[score] = []

                if score not in options_by_score:
                    options_by_score[score] = []

                # Set the option choices to return for bash-completion
                if cmd.option:

                    # If the user entered the exact keyword then we should return the
                    # options following that keyword.
                    if not cmd.last_matching_token or cmd.last_matching_token.exact_match:
                        options_by_score[score] += cmd.option

                    # If they only entered part of the keyword ('sh' for 'show' for example)
                    # then we should return 'show' so bash can tab complete it.
                    else:
                        if cmd.last_matching_token.key_text:
                            options_by_score[score].append(
                                cmd.last_matching_token.key_text)

                scores[score].append(cmd)

            if high_score in scores and scores[high_score]:
                self.closest_matches.append('')
                self.closest_matches.append('Closest Matches:')

                for cmd in scores[high_score]:
                    self.closest_matches.append('    %s %s' %
                                                (self.program, cmd.text))
                self.closest_matches.append('')

            if high_score in options_by_score and options_by_score[high_score]:
                self.options = sorted(set(options_by_score[high_score]))

        # This is a good thing
        elif len(candidates) == 1:
            cmd = candidates[0]

            logging.debug("There is one candidate:\n%s" % cmd)

            for token in cmd.tokens:

                # The key_text is only set if the token matched
                if token.key_text:
                    self.args[token.key_text] = token.value
                    logging.debug("args key: %s, value: %s" %
                                  (token.key_text, token.value))

            self.match = True
            self.options = []

            # Set the option choices to return for bash-completion
            if cmd.option:

                # If the user entered the exact keyword then we should return the
                # options following that keyword.
                if not cmd.last_matching_token or cmd.last_matching_token.exact_match:
                    self.options = cmd.option

                # If they only entered part of the keyword ('sh' for 'show' for example)
                # then we should return 'show' so bash can tab complete it.
                else:
                    if cmd.last_matching_token.key_text:
                        self.options = [cmd.last_matching_token.key_text]

            # If the user entered -h or --help print the docstring and exit
            if len(cmd.tokens) == 1:
                token = cmd.tokens[0]
                if token.key_text == '-h' or token.key_text == '--help':
                    print(self.docstring)
                    exit(0)

        else:
            # print("\nERROR: ambiguous parse chain...matches:")
            _str = 'Ambigious Match. Options:'
            _possible_matches = []
            for cmd in candidates:
                for _token in cmd.tokens:
                    if _token.key_text:
                        _possible_matches.append(_token.key_text)
            print(_str + ' ' + ', '.join(_possible_matches))
            exit(8)
示例#46
0
 async def now_playing(self, ctx):
     """Displays some informations about the current song"""
     player = self.bot.lava.get_player(ctx.guild.id)
     current_song = await self.bot.redis.execute(
         "LINDEX", f"{self.music_prefix}que:{ctx.guild.id}", 0)
     if not current_song:
         return await ctx.send(
             ":warning:`I'm not playing anything at the moment...`",
             delete_after=5)
     current_song = loads(current_song)
     if not (ctx.guild and ctx.author.color == discord.Color.default()):
         embed_color = ctx.author.color
     else:
         embed_color = self.bot.config.primary_colour
     player = self.bot.lava.get_player(ctx.guild.id)
     playing_embed = discord.Embed(title="Now playing...",
                                   colour=embed_color)
     if current_song.get("info"):
         if current_song["info"].get("title"):
             playing_embed.description = playing_embed.add_field(
                 name="Title",
                 value=f'```{current_song["info"]["title"]}```',
                 inline=False,
             )
         if current_song["info"].get("author"):
             playing_embed.add_field(name="Uploader",
                                     value=current_song["info"]["author"])
         if current_song["info"].get(
                 "length") and not current_song["info"].get("isStream"):
             try:
                 playing_embed.add_field(
                     name="Length",
                     value=timedelta(
                         milliseconds=current_song["info"]["length"]),
                 )
                 playing_embed.add_field(
                     name="Remaining",
                     value=str(
                         timedelta(
                             milliseconds=current_song["info"]["length"]) -
                         timedelta(seconds=player.position)).split(".")[0],
                 )
                 playing_embed.add_field(
                     name="Position",
                     value=str(
                         timedelta(seconds=player.position)).split(".")[0],
                 )
             except OverflowError:  # we cannot do anything if C cannot handle it
                 pass
         elif current_song["info"].get("isStream"):
             playing_embed.add_field(name="Length", value="Live")
         else:
             playing_embed.add_field(name="Length", value="N/A")
         if current_song["info"].get("uri"):
             playing_embed.add_field(
                 name="Link to the original",
                 value=f"**[Click me!]({current_song['info']['uri']})**",
             )
             if bool(
                     re_search(
                         r"^(?:(?:https?:)?\/\/)?(?:(?:www|m)\.)?(?:(?:youtube\.com|youtu.be))(?:\/(?:[\w\-]+\?v=|embed\/|v\/)?)(?:[\w\-]+)(\S+)?$|",
                         current_song["info"]["uri"],
                     )) and current_song["info"].get(
                         "identifier"):  # YT url check
                 playing_embed.set_thumbnail(
                     url=
                     f"https://img.youtube.com/vi/{current_song['info']['identifier']}/default.jpg"
                 )
         playing_embed.add_field(name="Volume", value=f"{player.volume} %")
         if player.paused:
             playing_embed.add_field(name="Playing status",
                                     value="`РЈИPaused`")
         if {"title", "length"
             } <= set(current_song["info"]
                      ) and not current_song["info"]["isStream"]:
             button_position = int(
                 100 * (player.position /
                        (current_song["info"]["length"] / 1000)) / 2.5)
             controller = (
                 f"```╔┤р┤Јр┤А р┤ў╩Ър┤ђ╩Ј╔ф╔┤╔б: {current_song['info']['title']}\n"
                 f"{(button_position - 1) * 'Рћђ'}Рџф{(40 - button_position) * 'Рћђ'}\n РЌёРЌёРађ{'РќљРќљ' if not player.paused else 'РќХ'} РађРќ║Рќ║РађРађсђђсђђРађ "
                 f"{str(timedelta(seconds=player.position)).split('.')[0]} / {timedelta(seconds=int(current_song['info']['length'] / 1000))}\n{11*' '}РћђРћђРћђРЌІРђё­ЪћіРађсђђсђђсђђр┤┤р┤░ РџЎ РЮљ РіЈРіљ```"
             )
             playing_embed.description = controller
     else:
         playing_embed.description = "```No informations```"
     if current_song.get("requester_id") and ctx.guild.get_member(
             current_song["requester_id"]
     ):  # check to avoid errors on guild leave
         req_member = ctx.guild.get_member(current_song["requester_id"])
         playing_embed.set_footer(
             text=f"Song requested by: {req_member.display_name}",
             icon_url=req_member.avatar_url_as(format="png", size=64),
         )
     await ctx.send(embed=playing_embed)
示例#47
0
    "dev": ["black==20.8b1", "pylint==2.6.0", "tox==3.20.1"],
    "testing": [
        "mypy==0.790",
        "pytest==6.1.1",
        "pytest-cov==2.10.1",
        "pytest-xdist==2.1.0",
        "vcrpy==4.1.1",
    ],
}

with open(join(ROOT_DIR, "README.md"), encoding="utf-8") as readme_file:
    README = readme_file.read()

with open(join(ROOT_DIR, PACKAGE_DIR, "__version__.py"),
          encoding="utf-8") as version_file:
    VERSION = re_search(r'__version__\s+?=\s+?"(.+)"',
                        version_file.read()).group(1)

setup(
    name=PACKAGE_NAME,
    version=VERSION,
    description="Singer.io tap for extracting data",
    long_description=README,
    long_description_content_type="text/markdown",
    author="Stitch",
    url="http://singer.io",
    classifiers=[
        "Programming Language :: Python :: 3 :: Only",
        "License :: OSI Approved :: GNU Affero General Public License v3",
        "Operating System :: OS Independent",
    ],
    py_modules=["ordway_tap"],
示例#48
0
def constellation_stick_figures(filename, obstime, obspos, altitude_cutoff):
    """Obtain local coordinates for constellation stick figures."""
    const_name = []
    const_location = []
    for line in open(filename):
        if (line[0] == "#") or (len(line) < 10):
            continue
        try:
            cname, ra1, dec1, ra2, dec2 = re_search(
                "^\s*(\w+)\s+([-]?\d+\.\d+)\s+([-]?\d+\.\d+)\s+"
                "([-]?\d+\.\d+)\s+([-]?\d+\.\d+)\s*$", line).groups()

            const_name.append(cname)
            const_location.append(
                (float(ra1), float(dec1), float(ra2), float(dec2)))
        except ValueError:
            continue
    # Save extracted csvs
    with open(abspath("./Processed-Data/constellations-raw-data.csv"),
              "w") as csvfile:
        for i, cname in enumerate(const_name):
            csvfile.write("%s,%17.12f,%17.12f,%17.12f,%17.12f\n" %
                          (cname, const_location[i][0], const_location[i][1],
                           const_location[i][2], const_location[i][3]))
    # Transform coordinates to azimuth and altitude
    c_start_icrs = SkyCoord(ra=[const[0]
                                for const in const_location] * u.degree,
                            dec=[const[1]
                                 for const in const_location] * u.degree,
                            frame='icrs')
    c_start_azalt = c_start_icrs.transform_to(
        AltAz(obstime=obstime, location=obspos))
    c_stop_icrs = SkyCoord(ra=[const[2]
                               for const in const_location] * u.degree,
                           dec=[const[3]
                                for const in const_location] * u.degree,
                           frame='icrs')
    c_stop_azalt = c_stop_icrs.transform_to(
        AltAz(obstime=obstime, location=obspos))
    # Extract transformed coordinates
    az1 = list(c_start_azalt.az.deg)
    alt1 = list(c_start_azalt.alt.deg)
    az2 = list(c_stop_azalt.az.deg)
    alt2 = list(c_stop_azalt.alt.deg)
    stick_figures_all = defaultdict(list)
    # Save transformed coordinates as a defaultdict
    for i, cname in enumerate(const_name):
        stick_figures_all[cname].append((az1[i], alt1[i], az2[i], alt2[i]))
    # Backup data as a csv
    with open(abspath("./Processed-Data/constellations-all.csv"),
              "w") as csvfile:
        for cname in sorted(stick_figures_all.keys()):
            for a1, e1, a2, e2 in stick_figures_all[cname]:
                csvfile.write("%s,%17.12f,%17.12f,%17.12f,%17.12f\n" %
                              (cname, a1, e1, a2, e2))
    # Obtain only the visible hemisphere
    stick_figures = {}
    for cname in sorted(stick_figures_all.keys()):
        save_flag = True
        for a1, e1, a2, e2 in stick_figures_all[cname]:
            if (e1 >= altitude_cutoff) and (e2 >= altitude_cutoff):
                save_flag = (True and save_flag)
            else:
                save_flag = False
                break
        # Check save flag and append to dictionary
        if save_flag:
            stick_figures[cname] = stick_figures_all[cname]
    # Save filtered coordinates as csv
    with open(abspath("./Processed-Data/constellations-visible.csv"),
              "w") as csvfile:
        for cname in sorted(stick_figures.keys()):
            for a1, e1, a2, e2 in stick_figures[cname]:
                csvfile.write("%s,%17.12f,%17.12f,%17.12f,%17.12f\n" %
                              (cname, a1, e1, a2, e2))
    # Return stick_figures data
    return stick_figures
示例#49
0
def list_page(request, boardname):
  if re_search("\d\d", boardname) and int(boardname) != int(request.user.get_profile().get_pure_sid()):
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당 게시판에 접근할 권한이'})

  page = int(request.GET.get('page', 1))
  bulletinPerPage = getattr(settings, 'BULLETIN_PER_PAGE', 15)  # 한 페이지에 표시하는 게시물 수
  board = get_board(boardname)

  if board is None:
    return render_to_response('noExist.html',{'user':request.user, 'target':'게시판이'})

  try:
    if not board.group_board.members.filter(id=request.user.id).exists():
      return render_to_response('noExist.html',{'user':request.user, 'target':'해당 소모임게시판에 접근할 권한이'})
  except ObjectDoesNotExist:
    pass

  if boardname == 'likebook':
    supportWrite = False    # 글쓰기를 지원하는가
    like_list = Like.objects.filter(Q(user=request.user) & Q(bulletin__deleted=False))
    total_bulletin = like_list.count()
    like_list = like_list[bulletinPerPage * (page - 1):bulletinPerPage * page]
    bulletinList = [like.bulletin for like in like_list]
    bulletinCount = len(bulletinList)
  elif boardname == 'my':  # 내가 쓴 글
    supportWrite = False
    list = Bulletin.bulletIns.filter(Q(writer=request.user) &
                                     Q(deleted=False))  # 총 리스트
    total_bulletin = list.count()  # 총 카운트
    bulletinList = list[bulletinPerPage*(page-1):bulletinPerPage*page]
    bulletinCount = bulletinList.count()
  elif boardname == 'all':  # 전체 게시물
    supportWrite = False
    key = request.GET.get('key', None)
    list = Bulletin.bulletIns.filter(
      Q(deleted=False) &
      (Q(board__secret=False)
       |Q(board__group_board__members=request.user)# 소모임 게시판도 가져오고 싶은데...
       |Q(board__name=request.user.get_profile().get_pure_sid())  # 학번게시판은 가져와 줘야지...
      )
    )  # 총 리스트
    if key:
      list = list.filter(
        Q(isHiddenUser=False) &
        (Q(title__icontains=key) |
         Q(writer__first_name__icontains=key)
        )
      )
    total_bulletin = list.count()  # 총 카운트
    bulletinList = list[bulletinPerPage*(page-1):bulletinPerPage*page]
    bulletinCount = bulletinList.count()
  else:
    supportWrite = True    # 글쓰기를 지원한다
    # 여기서 리스트를 받아온다.
    list = Bulletin.bulletIns.filter(Q(board=board) &
                                     Q(notice=False) &
                                     Q(deleted=False))  # 총 리스트
    total_bulletin = list.count()  # 총 카운트
    list = list[bulletinPerPage*(page-1):bulletinPerPage*page]

    bulletinCount = list.count()
    if page is 1:
      noticeList = Bulletin.notices.filter(Q(board=board) & Q(deleted=False))
      bulletinList = QuerySetChain(noticeList, list)
    else:
      bulletinList = list

  form = BulletinSearchForm(boardname=boardname, data=request.GET)
  if (page is not 1) and len(bulletinList) is 0:
    return HttpResponseRedirect('/board/%s/' % boardname)    # 기본 페이지에 가자


  total_page = total_bulletin / bulletinPerPage    # 총 페이지
  if total_bulletin % bulletinPerPage:  # 남는게 있으면
    total_page += 1             # 갯수 하나 늘려줌
  no_seq = total_bulletin - (page-1) * bulletinPerPage - bulletinCount    # 번호 시퀀스!
  page_before = page-5         # 이전 다섯 페이지
  page_after = page+5         # 다음 다섯 페이지
  sPage = max(1, page-4)          # 페이지 시작
  ePage = min(page+4, total_page) + 1   # 페이지 끝
  page_list = range(sPage, ePage)      # 페이지 리스트

  if boardname in specialTemplate:
    template_name = 'board/list_content/%s_list.html' % boardname
  else:
    template_name = 'board/list_content/base_list.html'
  return direct_to_template(request, template_name, {
    'form':form,        # 검색폼. 테스트용.
    'supportWrite':supportWrite,  # 지원하는 것
    'board':board,        # 게시판 아이디
    'page':page,        # 현재 페이지
    'no_seq':no_seq,      # 현재 index seq start number
    'total_page':total_page,  # 총 페이지
    'page_before':page_before,  # 이전 5장
    'page_after':page_after,  # 다음 5장
    'page_list':page_list,    # 페이지 리스트
    'bulletinList':bulletinList,    # 게시물 리스트
  })
示例#50
0
def cmd_depot_download(args):
    pbar = fake_tqdm()
    pbar2 = fake_tqdm()

    try:
        with init_clients(args) as (_, _, manifests):
            fileindex = ManifestFileIndex(manifests)

            # pre-index vpk file to speed up lookups
            if args.vpk:
                fileindex.index('*.vpk')

            # calculate total size
            total_files = 0
            total_size = 0

            LOG.info("Locating and counting files...")

            for manifest in manifests:
                for depotfile in manifest:
                    if not depotfile.is_file:
                        continue

                    filepath = depotfile.filename_raw

                    # list files inside vpk
                    if args.vpk and filepath.endswith('.vpk'):
                        # fast skip VPKs that can't possibly match
                        if args.name and ':' in args.name:
                            pre = args.name.split(':', 1)[0]
                            if not fnmatch(filepath, pre):
                                continue
                        if args.regex and ':' in args.regex:
                            pre = args.regex.split(':', 1)[0]
                            if not re_search(pre + '$', filepath):
                                continue

                        # scan VPKs, but skip data only ones
                        if filepath.endswith('_dir.vpk') or not re.search(
                                "_\d+\.vpk$", filepath):
                            LOG.debug("Scanning VPK file: %s", filepath)

                            try:
                                fvpk = fileindex.get_vpk(filepath)
                            except ValueError as exp:
                                LOG.error("VPK read error: %s", str(exp))
                            else:
                                for vpkfile_path, (
                                        _, _, _, _, _,
                                        size) in fvpk.c_iter_index():
                                    complete_path = "{}:{}".format(
                                        filepath, vpkfile_path)

                                    if args.name and not fnmatch(
                                            complete_path, args.name):
                                        continue
                                    if args.regex and not re_search(
                                            args.regex, complete_path):
                                        continue

                                    total_files += 1
                                    total_size += size

                    # account for depot files
                    if args.name and not fnmatch(filepath, args.name):
                        continue
                    if args.regex and not re_search(args.regex, filepath):
                        continue

                    total_files += 1
                    total_size += depotfile.size

            if not total_files:
                raise SteamError("No files found to download")

            # enable progress bar
            if not args.no_progress and sys.stderr.isatty():
                pbar = tqdm(desc='Data ',
                            mininterval=0.5,
                            maxinterval=1,
                            total=total_size,
                            unit='B',
                            unit_scale=True)
                pbar2 = tqdm(desc='Files',
                             mininterval=0.5,
                             maxinterval=1,
                             total=total_files,
                             position=1,
                             unit=' file',
                             unit_scale=False)
                gevent.spawn(pbar.gevent_refresh_loop)
                gevent.spawn(pbar2.gevent_refresh_loop)

            # download files
            tasks = GPool(6)

            for manifest in manifests:
                if pbar2.n == total_files:
                    break

                LOG.info("Processing manifest (%s) '%s' ..." %
                         (manifest.gid, manifest.name or "<Unknown>"))

                for depotfile in manifest:
                    if pbar2.n == total_files:
                        break

                    if not depotfile.is_file:
                        continue

                    filepath = depotfile.filename_raw

                    if args.vpk and filepath.endswith('.vpk'):
                        # fast skip VPKs that can't possibly match
                        if args.name and ':' in args.name:
                            pre = args.name.split(':', 1)[0]
                            if not fnmatch(filepath, pre):
                                continue
                        if args.regex and ':' in args.regex:
                            pre = args.regex.split(':', 1)[0]
                            if not re_search(pre + '$', filepath):
                                continue

                        # scan VPKs, but skip data only ones
                        if filepath.endswith('_dir.vpk') or not re.search(
                                "_\d+\.vpk$", filepath):
                            LOG.debug("Scanning VPK file: %s", filepath)

                            try:
                                fvpk = fileindex.get_vpk(filepath)
                            except ValueError as exp:
                                LOG.error("VPK read error: %s", str(exp))
                            else:
                                for vpkfile_path, metadata in fvpk.c_iter_index(
                                ):
                                    complete_path = "{}:{}".format(
                                        filepath, vpkfile_path)

                                    if args.name and not fnmatch(
                                            complete_path, args.name):
                                        continue
                                    if args.regex and not re_search(
                                            args.regex, complete_path):
                                        continue

                                    tasks.spawn(
                                        vpkfile_download_to,
                                        depotfile.filename,
                                        fvpk.get_vpkfile_instance(
                                            vpkfile_path,
                                            fvpk._make_meta_dict(metadata)),
                                        args.output,
                                        no_make_dirs=args.no_directories,
                                        pbar=pbar,
                                    )

                                    pbar2.update(1)

                                    # break out of vpk file loop
                                    if pbar2.n == total_files:
                                        break

                    # break out of depotfile loop
                    if pbar2.n == total_files:
                        break

                    # filepath filtering
                    if args.name and not fnmatch(filepath, args.name):
                        continue
                    if args.regex and not re_search(args.regex, filepath):
                        continue

                    tasks.spawn(
                        depotfile.download_to,
                        args.output,
                        no_make_dirs=args.no_directories,
                        pbar=pbar,
                        verify=(not args.skip_verify),
                    )

                    pbar2.update(1)

            # wait on all downloads to finish
            tasks.join()
            gevent.sleep(0.5)
    except KeyboardInterrupt:
        pbar.close()
        LOG.info("Download canceled")
        return 1  # error
    except SteamError as exp:
        pbar.close()
        pbar.write(str(exp))
        return 1  # error
    else:
        pbar.close()
        if not args.no_progress:
            pbar2.close()
            pbar2.write('\n')
        LOG.info('Download complete')
示例#51
0
def modify_page(request, boardname, bid):
  if re_search("\d\d", boardname) and int(boardname) != int(request.user.get_profile().get_pure_sid()):
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당 게시판에 접근할 권한이'})
  board = get_board(boardname)
  try:
    if not board.group_board.members.filter(id=request.user.id).exists():
      return render_to_response('noExist.html',{'user':request.user, 'target':'해당 소모임게시판에 접근할 권한이'})
  except ObjectDoesNotExist:
    pass
  if board is None:
    return render_to_response('noExist.html',{'user':request.user, 'target':'게시판이'})
  page = int(request.GET.get('page', 1))
  try:
    bulletin = Bulletin.bulletIns.get(id=bid)
  except ObjectDoesNotExist:
    return render_to_response('noExist.html',{'user':request.user, 'target':'해당글이'})

  if not bulletin.isMyBulletin(request.user):    # 내 글이 아니라면...
    return HttpResponseRedirect('/board/%s/read/%d/?page=%d'% (boardname, int(bid), page))
  if bulletin.deleted:
    return HttpResponseRedirect('/board/%s/read/%d/?page=%d'% (boardname, int(bid), page))

  # 그리고 수정가능한 시각인지 확인해야 한다. 글쓴지 일주일이상 지났으면 수정이 불가능.
  if (datetime.now() - bulletin.created).days >= settings.CAN_MODIFY_DAYS:
    # 일주일이상 지난 것
    return HttpResponseRedirect('/board/%s/read/%s/?page=%d' % (boardname, int(bid), page))
  try:
    relatedPosition = RelatedPosition.objects.get(bulletin=bulletin)
  except ObjectDoesNotExist:
    relatedPosition = None
  if request.method == 'POST':
    form = WriteAndModifyForm(data=request.POST, files=request.FILES, board=board)
    if form.is_valid():    # 폼이 옳다면
      form.cleaned_data['content'] = utils.stripXSS(form.cleaned_data['content'])    # Prevent XSS
      form.cleaned_data['content'] = utils.CompleteHTML(form.cleaned_data['content']) # complete HTML
      #if form.cleaned_data['nametype'] == u'실명':
      #if form.cleaned_data['nametype'] == False:
      #  bulletin.isHiddenUser = False    # 익명 여부를 FALSE로
      #else:
      #  bulletin.isHiddenUser=True       # 익명 여부를 TRUE로
      bulletin.isHiddenUser=form.cleaned_data['nametype']
      bulletin.title=form.cleaned_data['title']
      bulletin.content=form.cleaned_data['content']
      #bulletin.secret=form.cleaned_data['secret']
      bulletin.notice=form.cleaned_data['notice']
      bulletin.save()
      if form.cleaned_data['position']:
        if relatedPosition:      # 이미 있다면 그냥 바꾸고 저장
          relatedPosition.title=form.cleaned_data['positionTitle']
          relatedPosition.position=form.cleaned_data['position']
          relatedPosition.save()
        else:            # 없다면 새로 만듦.
          RelatedPosition.objects.create(
            bulletin=bulletin,
            title=form.cleaned_data['positionTitle'],
            position=form.cleaned_data['position'],
          )
      else:      # 만약 포지션이 삭제 되었는데...
        if relatedPosition:        # 등록된 것이 있다면
          relatedPosition.delete()  # 가차없이 삭제
      if form.cleaned_data['file']:  # 파일
        for file in form.cleaned_data['file']:
          RelatedFile.objects.create(
            board=board,
            bulletin=bulletin,
            file=file,
            size=file.size,
          )
      if form.cleaned_data['gallery'] and board.name == "photo":    # 갤러리가 있을 경우
        if bulletin.gallery:        # 이미 있으면 삭제
          for photo in bulletin.gallery.photos.all():
            photo.delete()
          bulletin.gallery.delete()    # 여기 안에 사진 다 삭제해야 함..ㅠ
        # 갤러리를 만들고 거기 올린다.
        bulletin.gallery = GalleryUpload(owner=request.user,
            zip_file=form.cleaned_data['gallery'],
            title=' >> '.join([board.title, form.cleaned_data['title']]),
            #title=form.cleaned_data['title'],
            is_public=True).save()
        bulletin.save()    # 저장
      if boardname == 'star':
        if Rate.objects.filter(bulletin=bulletin).exists(): # 있다면
          rate = Rate.objects.get(bulletin=bulletin)
          rate.rate = form.cleaned_data['starpoint'],
          rate.save()
        else:
          Rate.objects.create(
            rate = form.cleaned_data['starpoint'],
            bulletin = bulletin
          )
      # 익명이면 - 아니면 +를 달아준다.
      if bulletin.isHiddenUser:
        anomChar = '-'
      else:
        anomChar = '+'
      # 스크랩한 모든 사람들에게 Feed를 달아준다
      for like in bulletin.likes.all():
        user = like.user
        if user != request.user:
          if len(bulletin.title) > 20:
            additionalTitle = '%s...' % bulletin.title[:17]
          else:
            additionalTitle = bulletin.title[:20]
          Feed.objects.create(
            url="/board/%s/read/%d/?page=%d"%(boardname, int(bid), page),
            from_user=request.user,
            to_user=user,
            additional='%s%s'%(anomChar, additionalTitle),
            type=u'SM',
          )
      try:
        if board.group_board:
          # 소모임의 모든 사람들에게 Feed를 달아준다
          for user in board.group_board.members.all():
            if user != request.user:
              Feed.objects.create(
                url="/board/%s/read/%s/"%(boardname, bid),
                from_user=request.user,
                to_user=user,
                additional=board.group_board.id,
                type=u'GM',
              )
          # 소모임 상태를 갱신해준다(new)
          board.group_board.save()
      except ObjectDoesNotExist:
        pass
      board.save()  # 보드 상태도 갱신
      return HttpResponseRedirect('/board/%s/read/%d/?page=%d'% (boardname, int(bid), page))
  else:
    if relatedPosition:    # 좌표 있으면
      rTitle = relatedPosition.title
      rPos = relatedPosition.position
    else:      # 좌표 없으면
      rTitle = ''
      rPos = ''

    form = WriteAndModifyForm(board=board, data={
        'title':bulletin.title,
        'content':bulletin.content,
        'positionTitle':rTitle,
        'position':rPos,
        'nametype':bulletin.isHiddenUser,
        'notice':bulletin.notice,
        #'secret':bulletin.secret,
        })
  relatedFile = RelatedFile.objects.filter(bulletin=bulletin)
  for file in relatedFile:
    file.name = file.file.name.split('/')[-1]

  tpl = loader.get_template('board/write_and_modify.html')    # write.html이라는 페이지를 template로 하여 출력합니다.
  ctx = RequestContext(request, {            # parameter를 dictionary형식으로 넣을 수 있습니다.
    'board':board,
    'bulletin':bulletin,
    'form':form,
    'page':page,
    'type':'modify',
    'typeText':u'수정',
    'files':relatedFile,
    'boardname':boardname,
    'MAX_A_FILE_SIZE':settings.MAX_A_FILE_SIZE,
    'MAX_TOTAL_FILE_SIZE':settings.MAX_TOTAL_FILE_SIZE,
    })
  return HttpResponse(tpl.render(ctx))
示例#52
0
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        """ Impcap/Log Darwin policy """
        self.fields['darwin_policy'] = ModelChoiceField(
            label=_("Darwin policy"),
            queryset=DarwinPolicy.objects.all(),
            widget=Select(attrs={'class': 'form-control select2'}),
            required=False)
        """ Log forwarders """
        self.fields['log_forwarders'] = ModelMultipleChoiceField(
            label=_("Log forwarders"),
            queryset=LogOM.objects.all().only(*LogOM.str_attrs()),
            widget=SelectMultiple(attrs={'class': 'form-control select2'}),
            required=False)
        """ Log forwarders """
        self.fields['log_forwarders_parse_failure'] = ModelMultipleChoiceField(
            label=_("Log forwarders - parse failure"),
            queryset=LogOM.objects.all().only(*LogOM.str_attrs()),
            widget=SelectMultiple(attrs={'class': 'form-control select2'}),
            required=False)
        """ MMDP Reputation database IPv4 """
        # Defined here AND in model, to use queryset
        self.fields['logging_reputation_database_v4'] = ModelChoiceField(
            label=_("Rsyslog IPv4 reputation database"),
            # queryset=[(f.get('id'), str(f)) for f in Feed.objects.mongo_find({"filename": {"$regex": r"\.mmdb$"},  # MMDB database
            #                                   "label": {"$regex": r"^((?![iI][Pp][Vv]6).)*$"}},  # Excude IPv6
            #                                  {"filename": 1, "label": 1})],  # .only( label, filename )
            # queryset=Feed.objects.filter(filename__iregex="^((?![iI][Pp][Vv]6).)*\.mmdb$"),
            # queryset=Feed.objects.exclude(filename__iregex="^((?![iI][Pp][Vv]6).)*$").filter(filename__endswith=".mmdb"),
            queryset=ReputationContext.objects.filter(
                db_type="ipv4", filename__endswith=".mmdb").only(
                    *(ReputationContext.str_attrs() +
                      ['filename', 'db_type'])),
            widget=Select(attrs={'class': 'form-control select2'}),
            empty_label="No IPv4",
            required=False)
        """ MMDP Reputation database IPv6 """
        # Defined here AND in model, to use queryset
        self.fields['logging_reputation_database_v6'] = ModelChoiceField(
            label=_("Rsyslog IPv6 reputation database"),
            queryset=ReputationContext.objects.filter(
                db_type="ipv6",
                filename__endswith=".mmdb")  # MMDP database & IPv6
            .only(*(ReputationContext.str_attrs() + ['filename', 'db_type'])),
            widget=Select(attrs={'class': 'form-control select2'}),
            empty_label="No IPv6",
            required=False)
        self.fields['logging_geoip_database'] = ModelChoiceField(
            label=_("Rsyslog GeoIP database"),
            queryset=ReputationContext.objects.filter(db_type="GeoIP").only(
                *(ReputationContext.str_attrs() + ['filename', 'db_type'])),
            widget=Select(attrs={'class': 'form-control select2'}),
            empty_label="No GeoIP",
            required=False)

        self.fields['node'] = ModelChoiceField(
            label=_('Node'),
            queryset=Node.objects.all(),
            widget=Select(attrs={'class': 'form-control select2'}))

        # Remove the blank input generated by django
        for field_name in [
                'mode', 'ruleset', 'log_level', 'listening_mode',
                'compression_algos', 'log_forwarders', 'impcap_intf'
        ]:
            self.fields[field_name].empty_label = None
        self.fields['error_template'].empty_label = "No template"
        # Set required in POST data to False
        for field_name in [
                'log_condition', 'ruleset', 'log_level', 'listening_mode',
                'headers', 'custom_haproxy_conf', 'cache_total_max_size',
                'cache_max_age', 'compression_algos', 'compression_mime_types',
                'error_template', 'enable_logging_reputation', 'impcap_filter',
                'impcap_filter_type', 'impcap_intf', 'tags', 'timeout_client',
                'timeout_connect', 'timeout_keep_alive', 'parser_tag',
                'file_path', 'node'
        ]:
            self.fields[field_name].required = False
        """ Build choices of "ruleset" field with rsyslog jinja templates names """
        # read the entries
        try:
            with scandir(JINJA_RSYSLOG_PATH) as listOfEntries:
                for entry in listOfEntries:
                    if entry.is_dir():
                        m = re_search("rsyslog_ruleset_([\w-]+)", entry.name)
                        if m:
                            # Do NOT process haproxy - it's an internal log type
                            if m.group(1) in ("haproxy", "haproxy_tcp"):
                                continue
                            self.fields['ruleset'].widget.choices.append(
                                (m.group(1), m.group(1)))
        except Exception as e:
            logger.error(
                "Cannot build 'ruleset' choices. Seems that path '{}' is not found: "
                .format(JINJA_RSYSLOG_PATH))
            logger.exception(e)

        # Set initial value of compression_algos field,
        #  convert space separated string into list
        if self.initial.get('compression_algos'):
            self.initial['compression_algos'] = self.initial.get(
                'compression_algos').split(' ')
        self.initial['tags'] = ','.join(
            self.initial.get('tags', []) or self.fields['tags'].initial)
示例#53
0
def cmd_depot_diff(args):
    try:
        with init_clients(args) as (_, _, manifests):
            targetdir = args.TARGETDIR
            fileindex = {}

            for manifest in manifests:
                LOG.debug("Scanning manifest: %r", manifest)
                for mfile in manifest.iter_files():
                    if not mfile.is_file:
                        continue

                    if args.name and not fnmatch(mfile.filename_raw,
                                                 args.name):
                        continue
                    if args.regex and not re_search(args.regex,
                                                    mfile.filename_raw):
                        continue

                    if args.show_extra:
                        fileindex[mfile.filename] = mfile.file_mapping

                    if args.hide_missing and args.hide_mismatch:
                        continue

                    full_filepath = os.path.join(targetdir, mfile.filename)
                    if os.path.isfile(full_filepath):
                        # do mismatch, checksum checking
                        size = os.path.getsize(full_filepath)

                        if size != mfile.size:
                            print("Mismatch (size differ):", full_filepath)
                            continue

                        # valve sets the checksum for empty files to all nulls
                        if size == 0:
                            b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
                        else:
                            chucksum = calc_sha1_for_file(full_filepath)

                        if chucksum != mfile.file_mapping.sha_content:
                            print("Mismatch (checksum differ):", full_filepath)

                    elif not args.hide_missing:
                        print("Missing file:", full_filepath)

            # walk file system and show files not in manifest(s)
            if args.show_extra:
                for cdir, _, files in os.walk(targetdir):
                    for filename in files:
                        filepath = os.path.join(cdir, filename)
                        rel_filepath = os.path.relpath(filepath, targetdir)

                        if rel_filepath.lower() not in fileindex:
                            print("Not in manifest:", filepath)

    except KeyboardInterrupt:
        return 1  # error
    except SteamError as exp:
        LOG.error(exp)
        return 1  # error
示例#54
0
    def __init__(self, master: O[Widget] = None, scrollbars: str = 'SE', dohide: bool = True,
                 padding: U[int, tuple[int], list[int]] = (3, 0, 0, 3), doupdate: bool = True,
                 scrollspeed: int = 2, **kwargs):
        """\
        Parameters
        ----------
        master : tkWidget, optional (default is tkTk)
            The parent widget

        scrollbars : str, optional (default is "SE")
            Where to put the scrollbars

        padding : int | sequence[int] where len=(2 or 4), optional (default is (3, 0, 0, 3))
            Padding around the scroll_canvas widget

        dohide : bool, optional (default is True)
            Whether to hide the scrollbars when not needed

        doupdate : bool, optional (default is True)
            Whether to automatically redraw the widget whenever it's resized

        scrollspeed : int, optional (default is 2)
            The number of lines to scroll by. 0 disables mousewheel scrolling

        **kwargs : keyword arguments, optional
            Any additional tkFrame parameters
        """
        self.__validateVars(padding, scrollbars)
        # set var defaults if not specified
        kwargs.update(bd=kwargs.pop('borderwidth', kwargs.pop('bd', 2)),
                      relief=kwargs.pop('relief', 'ridge'),
                      width=kwargs.pop('width', 300),
                      height=kwargs.pop('height', 200))
        # set initial values
        self.__BG = (kwargs.get('background') or
                     kwargs.get('bg', 'SystemButtonFace'))
        self.__CURSOR = kwargs.get('cursor', '')
        self.dohide = dohide
        self.doupdate = doupdate
        self.scrollspeed = scrollspeed
        self.scrollbar_h = None
        self.scrollbar_v = None
        self.__showVScroll = False
        self.__showHScroll = False
        self.__allChildren = set()
        # create widget
        self.__createContainer(master, kwargs)
        self.__sfc = f'{self.container.winfo_id()}_children'
        self.__createScrollFrame()
        self.redraw()
        if doupdate:
            self.container.bind(sequence='<Configure>',
                                func=self.redraw)
        # Pass geometry methods to container
        def meths(cls): return vars(cls).keys()
        all_frame_methods = meths(Frame)
        all_geo_methods = meths(Pack) | meths(Grid) | meths(Place)
        geo_methods = all_geo_methods.difference(all_frame_methods)
        for method in geo_methods:
            if not re_search(r'^_|config|slaves|propagate|location', method):
                setattr(self, method, getattr(self.container, method))
示例#55
0
def _sqlite_regexp(pattern, string):
    if pattern is None or string is None:
        return None
    if not isinstance(string, str):
        string = str(string)
    return bool(re_search(pattern, string))
示例#56
0
from re import search as re_search

import getopt

try:
    opts, args = getopt.gnu_getopt(sys.argv[1:], "hvd:p:a:r:l:t:s:i:", [
        "help", "version", "indir=", "packet-size=", "algorithm=",
        "taxannot-resdir=", "local-fasta-to-bd=", "threads=", "accession=",
        "use-index="
    ])
except getopt.GetoptError as gerr:
    print(str(gerr))
    platf_depend_exit(2)
# end try

is_fq_or_fa = lambda f: not re_search(r".*\.(m)?f(ast)?(a|q)(\.gz)?$", f
                                      ) is None

# Default values:
fq_fa_list = list()  # list of paths to file meant to be processed
indir_path = None  # path to `-d` directory
packet_size = 100
blast_algorithm = "megaBlast"
tax_annot_res_dir = "barapost_result"  # directory with taxonomic annotation results
your_own_fasta_lst = list(
)  # list os user's fasta files to be included in database
accs_to_download = list()  # list of accessions of GenBank records to download
n_thr = 1  # number of threads
use_index = "true"

# Add positional arguments to fq_fa_list
for arg in args:
示例#57
0
    def qc_flag_block_plot(self,
                           data_field=None,
                           dsname=None,
                           subplot_index=(0, ),
                           time_rng=None,
                           assesment_color=None,
                           **kwargs):
        """
        Create a time series plot of embedded quality control values
        using broken bahr plotting.

        Parameters
        ----------
        data_field : str
            Name of data field in the object to plot corresponding quality control.
        dsname : None or str
            If there is more than one datastream in the display object the
            name of the datastream needs to be specified. If set to None and
            there is only one datastream ACT will use the sole datastream
            in the object.
        subplot_index : 1 or 2D tuple, list, or array
            The index of the subplot to set the x range of.
        time_rng : tuple or list
            List or tuple with (min, max) values to set the x-axis range limits.
        assesment_color : dictionary
            Dictionary lookup to override default assessment to color. Make sure
            assessment work is correctly set with case syntax.
        **kwargs : keyword arguments
            The keyword arguments for :func:`plt.broken_barh`.
        """

        # Color to plot associated with assessment.
        color_lookup = {
            'Bad': 'red',
            'Incorrect': 'red',
            'Indeterminate': 'orange',
            'Suspect': 'orange',
            'Missing': 'darkgray'
        }

        if assesment_color is not None:
            for asses, color in assesment_color.items():
                color_lookup[asses] = color
                if asses == 'Incorrect':
                    color_lookup['Bad'] = color
                if asses == 'Suspect':
                    color_lookup['Indeterminate'] = color

        # Set up list of test names to use for missing values
        missing_val_long_names = [
            'Value.* equal to missing_value*', 'Value.* set to missing_value*'
        ]

        if dsname is None and len(self._arm.keys()) > 1:
            raise ValueError(("You must choose a datastream when there are 2 "
                              "or more datasets in the TimeSeriesDisplay "
                              "object."))
        elif dsname is None:
            dsname = list(self._arm.keys())[0]

        # Set up or get current plot figure
        if self.fig is None:
            self.fig = plt.figure()

        # Set up or get current axes
        if self.axes is None:
            self.axes = np.array([plt.axes()])
            self.fig.add_axes(self.axes[0])

        ax = self.axes[subplot_index]

        # Set X Limit - We want the same time axes for all subplots
        data = self._arm[dsname][data_field]
        dim = list(self._arm[dsname][data_field].dims)
        xdata = self._arm[dsname][dim[0]]

        # Get data and attributes
        qc_data_field = self._arm[dsname].qcfilter.check_for_ancillary_qc(
            data_field)
        flag_masks = self._arm[dsname][qc_data_field].attrs['flag_masks']
        flag_meanings = self._arm[dsname][qc_data_field].attrs['flag_meanings']
        flag_assessments = self._arm[dsname][qc_data_field].attrs[
            'flag_assessments']

        # Get time ranges for green blocks
        time_delta = determine_time_delta(xdata.values)
        barh_list_green = reduce_time_ranges(xdata.values,
                                             time_delta=time_delta,
                                             broken_barh=True)

        test_nums = []
        for ii, assess in enumerate(flag_assessments):
            # Plot green data first.
            ax.broken_barh(barh_list_green, (ii, ii + 1), facecolors='green')
            # Get test number from flag_mask bitpacked number
            test_nums.append(parse_bit(flag_masks[ii]))
            # Get masked array data to use mask for finding if/where test is set
            data = self._arm[dsname].qcfilter.get_masked_data(
                data_field, rm_tests=test_nums[-1])
            if np.any(data.mask):
                # Get time ranges from time and masked data
                barh_list = reduce_time_ranges(xdata.values[data.mask],
                                               time_delta=time_delta,
                                               broken_barh=True)
                # Check if the bit set is indicating missing data. If so change
                # to different plotting color than what is in flag_assessments.
                for val in missing_val_long_names:
                    if re_search(val, flag_meanings[ii]):
                        assess = "Missing"
                        break
                # Lay down blocks of tripped tests using correct color
                ax.broken_barh(barh_list, (ii, ii + 1),
                               facecolors=color_lookup[assess])
            # Add test description to plot.
            ax.text(xdata.values[0], ii + 0.5, flag_meanings[ii], va='center')

        # Set background to gray indicating not available data
        ax.set_facecolor('dimgray')
        # Change y ticks to test number
        plt.yticks([ii + 0.5 for ii in range(0, len(test_nums))],
                   labels=['Test ' + str(ii[0]) for ii in test_nums])
        # Set ylimit to number of tests plotted
        ax.set_ylim(0, len(flag_assessments))
        # Set X Limit - We want the same time axes for all subplots
        if not hasattr(self, 'time_rng'):
            if time_rng is not None:
                self.time_rng = list(time_rng)
            else:
                self.time_rng = [xdata.min().values, xdata.max().values]

        self.set_xrng(self.time_rng, subplot_index)

        # Get X format - We want the same time axes for all subplots
        if hasattr(self, 'time_fmt'):
            ax.xaxis.set_major_formatter(self.time_fmt)
        else:
            # Set X Format
            if len(subplot_index) == 1:
                days = (self.xrng[subplot_index, 1] -
                        self.xrng[subplot_index, 0])
            else:
                days = (self.xrng[subplot_index[0], subplot_index[1], 1] -
                        self.xrng[subplot_index[0], subplot_index[1], 0])

            myFmt = common.get_date_format(days)
            ax.xaxis.set_major_formatter(myFmt)
            self.time_fmt = myFmt

        return self.axes[subplot_index]
示例#58
0
def cmd_depot_list(args):
    def print_file_info(filepath, info=None):
        # filepath filtering
        if args.name and not fnmatch(filepath, args.name):
            return
        if args.regex and not re_search(args.regex, filepath):
            return

        # output
        if info:
            print("{} - {}".format(filepath, info))
        else:
            print(filepath)

    try:
        with init_clients(args) as (_, _, manifests):
            fileindex = ManifestFileIndex(manifests)

            # pre-index vpk file to speed up lookups
            if args.vpk:
                fileindex.index('*.vpk')

            for manifest in manifests:
                if manifest.filenames_encrypted:
                    LOG.error(
                        "Manifest %s (depot %s) filenames are encrypted.",
                        manifest.gid, manifest.depot_id)
                    continue

                for mapping in manifest.payload.mappings:
                    # ignore symlinks and directorys
                    if mapping.linktarget or mapping.flags & EDepotFileFlag.Directory:
                        continue

                    filepath = mapping.filename.rstrip('\x00 \n\t')

                    # filepath filtering
                    if ((not args.name and not args.regex)
                            or (args.name and fnmatch(filepath, args.name)) or
                        (args.regex and re_search(args.regex, filepath))):

                        # print out for manifest file
                        if not args.long:
                            print(filepath)
                        else:
                            print("{} - size:{:,d} sha1:{}".format(
                                filepath,
                                mapping.size,
                                mapping.sha_content.hex(),
                            ))

                    # list files inside vpk
                    if args.vpk and filepath.endswith('.vpk'):
                        # fast skip VPKs that can't possibly match
                        if args.name and ':' in args.name:
                            pre = args.name.split(':', 1)[0]
                            if not fnmatch(filepath, pre):
                                continue
                        if args.regex and ':' in args.regex:
                            pre = args.regex.split(':', 1)[0]
                            if not re_search(pre + '$', filepath):
                                continue

                        # scan VPKs, but skip data only ones
                        if filepath.endswith('_dir.vpk') or not re.search(
                                "_\d+\.vpk$", filepath):
                            LOG.debug("Scanning VPK file: %s", filepath)

                            try:
                                fvpk = fileindex.get_vpk(filepath)
                            except ValueError as exp:
                                LOG.error("VPK read error: %s", str(exp))
                            else:
                                for vpkfile_path, (
                                        _, crc32, _, _, _,
                                        size) in fvpk.c_iter_index():
                                    complete_path = "{}:{}".format(
                                        filepath, vpkfile_path)

                                    if ((not args.name and not args.regex) or
                                        (args.name
                                         and fnmatch(complete_path, args.name))
                                            or (args.regex and re_search(
                                                args.regex, complete_path))):

                                        if args.long:
                                            print("{} - size:{:,d} crc32:{}".
                                                  format(
                                                      complete_path,
                                                      size,
                                                      crc32,
                                                  ))
                                        else:
                                            print(complete_path)

    except SteamError as exp:
        LOG.error(str(exp))
        return 1  # error
 def clean_psk_key(self):
     field_value = self.cleaned_data.get('psk_key', "")
     if field_value and not re_search("^[0-9a-fA-F]{32,}$", field_value):
         raise ValidationError("PSK key is too short. Minimum is 32 hex-digits.")
     return field_value
示例#60
0
def is_ip(string):
    """ This dummy function returns True is the string is probably an IP """
    return re_search("[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+", string) is not None