def _sendAndRecv(self,sendData,timeout=1000):
     answer = sendData
     self.flush()
     self.ser.write(answer)
     
     self._log("send[%s]" % codecs.encode(answer,'hex'))
     intime = datetime.today()
     outtime = intime + timedelta(milliseconds=timeout)
     total = 0
     full_data = ''
     while True:
         l = self.ser.inWaiting()
         if l == 0: 
             timespan = datetime.today()
             if timespan > outtime:
                 return False,''
             continue
         answer=self.ser.read(l)
         total += l
         full_data += answer
         if total >= 2:
             datalen = ord(full_data[1])
             if datalen <= total:
                 break
            
     self._log("recv[%s]" % codecs.encode(full_data,'hex'))
     return True,full_data
Example #2
0
 def name_get(self, cr, uid, ids, context=None):
             
     if context is None:
         context = {}
             
     ids = isinstance(ids, (int, long)) and [ids] or ids        
     if not len(ids):
         return []
     
     '''
     res = [(r['id'], ''.join([a for a in r['name'] if a != '0'])+'::'+(
         self.browse(cr, uid, r['id'], context).import_id.name or '')) \
         for r in self.read(cr, uid, ids, ['name', ], context)]
     return res
     '''
     
     # Verificacion de los datos a pasar para construir nombre
     res = []
     for r in self.read(cr, uid, ids, ['name', ], context):
         import_name = self.browse(cr, uid, r['id'], context).import_id.name
         if import_name == None:
             import_name = ''
         if re.match('^\d',r['name']):
             n_pack = re.split('^0*',r['name'])
             r_pack = n_pack[1]
         else:
             r_pack = r['name']
         name = codecs.encode(r_pack,'utf8')+'::'+codecs.encode(import_name,'utf8')
         res.append((r['id'], name))
         
     return res
Example #3
0
 def __send(self, msg, retry, waitForAnswer=True):
     if retry <= 0:
         raise RuntimeError('No more retries for %r on %s' % (msg, self.host))
     if not self.__co and self.lazy:
         self.connect()
     elif not self.__co:
         raise RuntimeError('No connection available')
     try:
         self.__co.sendall(msg)
         if not waitForAnswer:
             return
         answer, size = b'', 0
         while True:
             answer += self.__co.recv(4096)
             if size == 0 and len(answer) >= 4:
                 size = int(codecs.encode(answer[:4][::-1], 'hex'), 16)
             if len(answer) >= size and size != 0:  # this part is DEP specific
                 break
             readsel, _, _ = select([self.__co], [], [], 0.5)
             if len(readsel) == 0:
                 break
     except socket.error as ex:  # pragma: no cover
         print('%r' % ex)
         self.disconnect()
         self.connect()
         return self.__send(msg, retry=retry-1)
     return codecs.encode(answer, 'hex').decode().upper()
Example #4
0
def get_file_url(feed_entry_url):
    url = urlparse(feed_entry_url)
    feed_entry_url_path = urllib.parse.quote(codecs.encode(url.path,'utf-8'))
    url_template = '{0}://{1}{2}'
    with urllib.request.urlopen(url_template.format(url.scheme, url.netloc, feed_entry_url_path)) as f:
        response = f.read()
    html_string = str(response, 'utf-8')
    
    parser = etree.HTMLParser()
    
    tree = etree.parse(StringIO(html_string), parser)
    downloadLinks = tree.xpath('.//div[@class="linkList download"]/a/@href')
    if len(downloadLinks) == 0:
        intern_link = tree.xpath('.//div[@class="linkList intern"]/a/@href')[0]
        feed_entry_url_path = urllib.parse.quote(codecs.encode(intern_link,'utf-8'))
        with urllib.request.urlopen(url_template.format(url.scheme, url.netloc, feed_entry_url_path)) as f:
            response = f.read()
        html_string = str(response, 'utf-8')
        parser = etree.HTMLParser()
        tree = etree.parse(StringIO(html_string), parser)
        file_url = tree.xpath('.//div[@class="linkList download"]/a/@href')[0]
    else:
        file_url = downloadLinks[0]
    
    return url_template.format(url.scheme, url.netloc, file_url)
Example #5
0
    def sendTo(self, hosts=None, default_port=1000, module=0, msg=None):
        """
        Send the message to the DEP.

        If the argument 'hosts' is an IP or a list of IP's, it iterate over them to have an answer.
        If the argument 'hosts' is an instance of DEPMessage, it uses the hosts known in the instance.
        Open a connection to the DEP, sends the message and return the answer
        in a synchronous manner.
        """
        if msg:
            self.message = msg
        self.module = module
        telnet = Telnet()
        # t.set_debuglevel(100)
        if isinstance(hosts, str):
            hosts = (hosts,)
        elif isinstance(hosts, DEPMessage):
            self._hosts = hosts._hosts
            hosts = self._hosts.keys()

        for host in hosts or self.hosts:
            try:
                if host in self._hosts and self._hosts[host]:
                    telnet = self._hosts[host]
                else:
                    logger.info('%s not active in %r', host, self._hosts)
                    if ':' in host:
                        telnet.open(*host.split(':'))
                    else:
                        telnet.open(host, default_port)
                    self._hosts[host] = telnet
                sock = telnet.get_socket()
                msg = self._build()
                bytessent = sock.send(msg)
                logger.debug('%d chars sent', bytessent)
                answer, size = b'', None
                while True:
                    answer += sock.recv(4096)
                    if not size and len(answer) >= 4:
                        size = int(codecs.encode(answer[:4][::-1], 'hex'), 16)
                    if len(answer) >= size:
                        break
                    readsel, _, _ = select([sock], [], [], 0.5)
                    if len(readsel) == 0:
                        answer += telnet.read_very_lazy()
                        break
                break
            except socket.error:
                telnet.close()
                self._hosts[host] = None
                logger.exception('Socket issue with %s', host)
                continue
        else:
            raise ValueError('No dep available in the list : ' +
                             str(hosts or self._hosts))
        self.current_answer = codecs.encode(answer, 'hex').decode().upper()
        if LOG_DEPCALLS:  # pragma: no cover
            with open('result.txt', 'ab') as out:
                out.write(b'>>' + codecs.encode(msg[13:], 'hex').upper() + b':' + self.current_answer.encode().upper() + b'\n')
        return self.current_answer
Example #6
0
File: app.py Project: chfoo/gofart
    def fartenize_url(self, text, host_scheme, hostname, rot13=False):
        if len(text) > 1024:
            raise ValueError('Too long')

        if text.startswith('//'):
            text = 'https:{}'.format(text)
        elif text and '://' not in text:
            text = 'https://{}'.format(text)

        scheme, sep, link = text.partition('://')

        if not sep:
            raise ValueError('No separator')

        scheme = scheme.lower()

        if scheme not in ('http', 'https'):
            raise ValueError('Bad scheme')

        if '\n' in link or '\r' in link:
            raise ValueError('Newline found')

        return '{}://{}/{}/{}'.format(
            host_scheme, hostname,
            codecs.encode(scheme, 'rot_13') if rot13 else scheme,
            codecs.encode(link, 'rot_13') if rot13 else link
        )
Example #7
0
    def _op_return_hex(self, op_return):
        try:
            hex_op_return = codecs.encode(op_return, 'hex')
        except TypeError:
            hex_op_return = codecs.encode(op_return.encode('utf-8'), 'hex')

        return "6a%x%s" % (len(op_return), hex_op_return.decode('utf-8'))
Example #8
0
def save_entry_slice(entry, source_slice, sha256sum):
    """Save slice of the source file for an entry.

    Args:
        entry: An entry.
        source_slice: The lines that the entry should be replaced with.
        sha256sum: The sha256sum of the current lines of the entry.

    Returns:
        The `sha256sum` of the new lines of the entry.

    Raises:
        FavaAPIException: If the file at `path` is not one of the
            source files.

    """

    with open(entry.meta['filename'], 'r') as file:
        lines = file.readlines()

    first_entry_line = entry.meta['lineno'] - 1
    entry_lines = find_entry_lines(lines, first_entry_line)
    entry_source = ''.join(entry_lines).rstrip('\n')
    original_sha256sum = sha256(codecs.encode(entry_source)).hexdigest()
    if original_sha256sum != sha256sum:
        raise FavaAPIException('The file changed externally.')

    lines = (lines[:first_entry_line]
             + [source_slice + '\n']
             + lines[first_entry_line + len(entry_lines):])
    with open(entry.meta['filename'], "w") as file:
        file.writelines(lines)

    return sha256(codecs.encode(source_slice)).hexdigest()
Example #9
0
	def encode(self, input, errors='strict'):
		assert errors == 'strict'
		#return codecs.encode(input, self.base_encoding, self.name), len(input)

		# The above line could totally be all we needed, relying on the error
		# handling to replace the unencodable Unicode characters with our extended
		# byte sequences.
		#
		# However, there seems to be a design bug in Python (probably intentional):
		# the error handler for encoding is supposed to return a **Unicode** character,
		# that then needs to be encodable itself...  Ugh.
		#
		# So we implement what codecs.encode() should have been doing: which is expect
		# error handler to return bytes() to be added to the output.
		#
		# This seems to have been fixed in Python 3.3.  We should try using that and
		# use fallback only if that failed.
		# https://docs.python.org/3.3/library/codecs.html#codecs.register_error

		length = len(input)
		out = b''
		while input:
			try:
				part = codecs.encode(input, self.base_encoding)
				out += part
				input = '' # All converted
			except UnicodeEncodeError as e:
				# Convert the correct part
				out += codecs.encode(input[:e.start], self.base_encoding)
				replacement, pos = self.error(e)
				out += replacement
				input = input[pos:]
		return out, length
Example #10
0
 def ask_target(self, noise, stats):
     r = self.pick_random()
     while not r[0] in stats.get_wrongs():
         r = self.pick_random()
     q = r[0]
     a = r[1]
     pa = [a]
     while len(pa) < noise:
         pa.append(self.pick_random_but(pa)[1])
     random.shuffle(pa)
     print(q)
     for x in range(len(pa)):
         try:
             print('{}: {}'.format(x+1, codecs.encode(pa[x], sys.stdout.encoding)))
         except UnicodeEncodeError:
             print('{}: {}'.format(x+1, codecs.encode(pa[x], 'cp1252')))
     user_input = None
     while not isinstance(user_input, int) or user_input <= 0 or user_input > len(pa) :
         user_input = int(input('Select the correct anwser: '))
     if pa[user_input-1] == a:
         print('nice !\n')
         stats.correct(q)
     else:
         print('nope ...\n')
         stats.wrong(q)
def stackconvertSTR( string, win=False):
    db = []
    if len(string) == 1:
        string = codecs.encode(str.encode(string), 'hex')
        string = string.decode('utf-8')
        return r"\x6a"+r"\x"+string

    if "/" in string:
        if len(string) % 4 == 0:
            string = string
        elif  len(string) % 4 == 1:
            string = filler( string, 4)
        elif len(string)	% 4 == 2:
            string = filler( string, 3)
        elif len(string) % 4 == 3:
            string = filler( string, 2)
        for x in range(0,len(string),4):
            db.append(splitter(string[x:x+4]))
        return "".join(db[::-1])
        #return "".join(db)

    #Linux_x86
    #68 PUSH DWORD
    #6668 PUSH WORD
    #6A PUSH BYTE
    if len(string) == 4:
        first = codecs.encode(str.encode(string[::-1]), 'hex')
        stack = first.decode('utf-8')
        data = findall("..?", stack)
        return "\\x68\\x"+"\\x".join(data)


    elif len(string) % 4 == 0:
        for x in range(0,len(string),4):
            db.append(splitter(string[x:x+4]))
        if win == True:
            return "".join(db[::-1]) #Windows
        else:
            return "".join(db) #Unix,Linux etc..

    elif 2 < len(string) < 4:
        first = codecs.encode(str.encode(hexdump[::-1]), 'hex')
        first = first.decode('utf-8')
        second = findall("..?", first)[::-1]
        for x in second:
            db.append("\\x"+x)
        return "\\x66\\x68"+"".join(db)


    else:
        db = []
        for x in range(0,len(string),4):
            if len(string[x:x+4]) == 4:
                db.append(splitter(string[x:x+4]))
            else:
                db.append(splitter(string[x:x+4], "WordTime"))
        if win == True:
            return "".join(db[::-1]) #Windows
        else:
            return "".join(db) #Unix,Linux etc..)
Example #12
0
    def __init__(self, hawk_session=None, id=None, key=None, algorithm='sha256',
                 credentials=None, server_url=None, _timestamp=None):
        if credentials is not None:
            raise AttributeError("The 'credentials' param has been removed. "
                                 "Pass 'id' and 'key' instead, or '**credentials_dict'.")

        if (hawk_session and (id or key)
                or not hawk_session and not (id and key)):
            raise AttributeError("You should pass either 'hawk_session' "
                                 "or both 'id' and 'key'.")

        if hawk_session:
            try:
                hawk_session = codecs.decode(hawk_session, 'hex_codec')
            except binascii.Error as e:
                raise TypeError(e)
            keyInfo = 'identity.mozilla.com/picl/v1/sessionToken'
            keyMaterial = HKDF(hawk_session, "", keyInfo, 32*2)
            id = codecs.encode(keyMaterial[:32], "hex_codec")
            key = codecs.encode(keyMaterial[32:64], "hex_codec")

        self.credentials = {
            'id': id,
            'key': key,
            'algorithm': algorithm
        }
        self._timestamp = _timestamp
        self.host = urlparse(server_url).netloc if server_url else None
Example #13
0
def WinExec( command):
	from re import findall
	fill =  "31c9b957696e45eb0431c9eb0031c"
	fill += "031db31d231ff31f6648b7b308b7f0"
	fill += "c8b7f1c8b47088b77208b3f807e0c3"
	fill += "375f289c703783c8b577801c28b7a2"
	fill += "001c789dd81f957696e45753b8b34a"
	fill += "f01c645390e75f68b7a2401c7668b2"
	fill += "c6f8b7a1c01c78b7caffc01c789d9b1ff53e2fd"
	if len(command) == 4:
		stack = "%s" % (codecs.encode(command, 'hex'))
		data = findall("..?", stack)				
		fill += "68"+"".join(data)				
	else:											
		if len(command)%4 == 3:										
			padd = "\x20"							
		elif len(command)%4 == 2:				
			padd = "\x20"*2							
		elif len(command)%4 == 1:					
			padd = "\x20"*3							
		else:
			padd = ""
		command = command + padd
		fixmesempai = findall('....?', command)
		for x in fixmesempai[::-1]:
			first = str(codecs.encode(x[::-1].encode('utf-8'), 'hex'))
			second = findall("..?", first)[::-1]
			fill += "68"+"".join(second)
	fill += "89e2415152ffd7e886ffffff8b34af0"
	fill += "1c645813e4578697475f2817e045072"
	fill += "6f6375e98b7a2401c7668b2c6f8b7a1c"
	fill += "01c78b7caffc01c731c951ffd7"
	return "\\x"+"\\x".join(findall("..?", fill))
Example #14
0
    def _test_round_trip(self, tab, cls, convert_int):
        t = Table(
            tab,
            MetaData(),
            Column("data", String(50)),
            Column("rv", cls(convert_int=convert_int)),
        )

        with testing.db.connect() as conn:
            conn.execute(t.insert().values(data="foo"))
            last_ts_1 = conn.scalar("SELECT @@DBTS")

            if convert_int:
                last_ts_1 = int(codecs.encode(last_ts_1, "hex"), 16)

            eq_(conn.scalar(select([t.c.rv])), last_ts_1)

            conn.execute(
                t.update().values(data="bar").where(t.c.data == "foo")
            )
            last_ts_2 = conn.scalar("SELECT @@DBTS")
            if convert_int:
                last_ts_2 = int(codecs.encode(last_ts_2, "hex"), 16)

            eq_(conn.scalar(select([t.c.rv])), last_ts_2)
def parse_submission_file(directory, hours, assas, interesting_features):
    sub_df = pd.read_csv(directory + "submission.txt", sep="\t", parse_dates=[0], date_parser=data_date_parser)

    to_predict = {}

    for assa in assas:
        print assa
        with open(directory + "pickled_dataframes/" + assa + ".p") as pf:
            data_df = pickle.load(pf)

        #        maxtime = numpy.datetime64(pd.datetime(2012, 01, 03))
        #
        #        data_df=data_df[data_df.index.values < numpy.datetime64(maxtime)]
        #
        #        data_df.info()
        temp_sub = sub_df[sub_df.loc[:, "ASS_ASSIGNMENT"] == codecs.encode(assa, "utf_8")]
        #        print temp_sub
        #        print data_df
        #        data_df.info()
        #        print pd.datetime(2011,01,31,23)
        #        print data_df[pd.datetime(2011,01,31,23)]
        #        print numpy.datetime64(pd.datetime(2011,01,31,23))
        #        print data_df[numpy.datetime64(pd.datetime(2011,01,31,23))]
        #        temp_sub.info()
        x_list = getKeys(temp_sub["DATE"].tolist(), data_df, hours, interesting_features)

        to_predict[codecs.encode(assa, "utf_8")] = x_list

        with open(directory + "test_obs_dict_bis.p", "w") as fp:
            pickle.dump(to_predict, fp)

    return to_predict
 def test(self):
     #addr2 = addr_str[4:] + addr_str[2:4] + addr_str[0:2]
     cmd = "800590B0010000"
     answer = codecs.decode(cmd,'hex')
     self.flush()
     time.sleep(1)
     self.ser.write(answer)
     
     total = 0
     full_data = ''
     while True:
         print "waiting ..."
         l = self.ser.inWaiting()
         if l == 0: 
             continue
         print "recv ..."
         answer=self.ser.read(l)
         total += l
         full_data += answer
         p = codecs.encode(answer,'hex')
         print 'recv [%s]' % p
         if total >= 4:
             break
            
     return codecs.encode(full_data,'hex')
Example #17
0
    def __saveCache(self, file):
        cache_file = None
        try:
            temp = RopperService.CACHE_FOLDER
            if not os.path.exists(temp):
                os.makedirs(temp)

            cache_file = temp + os.path.sep + self.__getCacheFileName(file)
            count = RopperService.CACHE_FILE_COUNT
            if not isWindows() and len(file.allGadgets) > 1000:
                if os.path.exists(cache_file):
                    os.remove(cache_file)

                length = len(file.allGadgets)

                step = int(length / count)
                for i in range(count-1):
                    gadgets = file.allGadgets[i*step: (i+1)*step]
                    with open(cache_file+'_%d' % (i+1),'wb') as f:
                        f.write(encode(repr(gadgets).encode('ascii'),'zip'))

                gadgets = file.allGadgets[(count-1)*step:]
                with open(cache_file+'_%d' % (count),'wb') as f:
                    f.write(encode(repr(gadgets).encode('ascii'),'zip'))
                return

            with open(cache_file,'wb') as f:
                f.write(encode(repr(file.allGadgets).encode('ascii'),'zip'))
        except BaseException as e:
            print(e)
            if cache_file:
                for i in range(1, RopperService.CACHE_FILE_COUNT+1):
                    if os.path.exists(cache_file+'_%d' % i):
                        os.remove(cache_file+'_%d' % i)
Example #18
0
 def test_encode(self):
     self.assertEquals(codecs.encode(u'\xe4\xf6\xfc', 'latin-1'),
                       '\xe4\xf6\xfc')
     self.assertRaises(TypeError, codecs.encode)
     self.assertRaises(LookupError, codecs.encode, "foo", "__spam__")
     self.assertEquals(codecs.encode(u'abc'), 'abc')
     self.assertRaises(UnicodeEncodeError, codecs.encode, u'\xffff', 'ascii')
Example #19
0
	def usanitize(self, s, strict=False):

		"""
		desc:
			Converts all non-ASCII characters to U+XXXX notation, so that the
			resulting string can be treated as plain ASCII text.

		arguments:
			s:
				desc:	A unicode string to be santized
				type:	unicode

		keywords:
			strict:
				desc:	If True, special characters are ignored rather than
						recoded.
				type:	bool

		returns:
			desc:	A regular Python string with all special characters replaced
					by U+XXXX notation or ignored (if strict).
			type:	str
		"""

		if strict:
			_s = codecs.encode(s, u'ascii', u'ignore')
		else:
			_s = codecs.encode(s, u'ascii', u'osreplace')
		_s = str(_s)
		return _s.replace(os.linesep, '\n')
 def test(self,command):
     #addr2 = addr_str[4:] + addr_str[2:4] + addr_str[0:2]
     #cmd = "800590B0010000"
     cmd = command[1]
     answer = codecs.decode(cmd,'hex')
     self.flush()
     time.sleep(1)
     answer = self.pack_cmd(answer)
     self.ser.write(answer)
     
     total = 0
     print "[%s]send [%s]" % (command[0],codecs.encode(answer,'hex').upper())
     full_data = ''
     while True:
         #print "waiting ..."
         l = self.ser.inWaiting()
         if l == 0: 
             continue
         #print "recv ..."
         answer=self.ser.read(l)
         total += l
         full_data += answer
         p = codecs.encode(answer,'hex')
         #print 'recv [%s]' % p
         if ord(answer[-1]) == 0x03:
             break
          
     e = codecs.encode(full_data,'hex').upper()
     print "[%s]recv [%s]" % (command[0],e)
     if e[:2] != '02' or e[-2:] != '03':
         return e,False
     if e[6:10] != '0000':
         return e,False
         
     return e,True
Example #21
0
    def test_ping(self):
        self.CANEngine = CANSploit()
        self.CANEngine.load_config("tests/test_2.py")
        self.CANEngine.edit_module(0, {
            'pipe': 2,
            'body': '000000000000010203040506070102030405060711121314151617a1a2a3a4a5a6a7112233',
            'range': [542999, 543002],
            'mode': 'isotp'
        })
        self.CANEngine.start_loop()
        time.sleep(1)
        self.CANEngine.call_module(0, "s")
        time.sleep(1)
        index = 3

        ret = self.CANEngine.call_module(3, "p")

        _bodyList = self.CANEngine._enabledList[index][1]._bodyList
        self.assertTrue(543000 in _bodyList, "We should be able to find ID 543000")
        self.assertFalse(543002 in _bodyList, "We should not be able to find ID 543002")
        self.assertTrue([1, 1, 1, 1, 1, 1] == list(_bodyList[543001].values()), "We should not be able to find ID")
        self.assertTrue(
            "25112233" == (codecs.encode((list(_bodyList[543001].keys())[5][1]), 'hex_codec')).decode("ISO-8859-1"),
            "Last packet of sec should be like that"
        )
        self.assertTrue(
            "24a1a2a3a4a5a6a7" == (codecs.encode((list(_bodyList[543001].keys())[4][1]), 'hex_codec')).decode("ISO-8859-1") ,
            "Last packet of sec should be like that"
        )
Example #22
0
def decrypt_strings_utf16(i):
	o = io.StringIO()
	for line in readlines_utf16(i):
		match1 = internal_string_pattern1.search(line)
		match2 = internal_string_pattern2.search(line)
		if match1 and match2:
			id = int(match1.group('SettingID').decode('utf16'), 16)
			string = line[match1.end():match2.start()]
			off = (id << 1) % 256
			k = itertools.cycle(itertools.chain(key[off:], key[:off]))
			deciphered = xor_strings(string, k)
			# print('Deciphered', hex(id), repr(deciphered.decode('utf16')))
			o.write(line[:match1.end()].decode('utf16'))
			if len(deciphered) > 2 and deciphered[:2] == b'\0\0':
				info = 'Corrupt string in profile: %s -> %s' % (
					codecs.encode(string, 'hex').decode('ascii'),
					codecs.encode(deciphered, 'hex').decode('ascii'))
				print('INFO:', info)
				o.write(info)
			else:
				if deciphered[-2:] == b'\0\0':
					deciphered = deciphered[:-2]
				o.write(deciphered.decode('utf16'))
			o.write('"\r\n') # Strip InternalSettingFlag=V0
		else:
			o.write(line.decode('utf16'))

	return o.getvalue()
Example #23
0
def run(args):
    from jinja2 import Environment, PackageLoader, FileSystemLoader

    mirror2name = {}
    mirror2url = {}
    code2relname = dict([(r, cfg.get('release names', r))
                         for r in cfg.options('release names')
                         if not r == 'data'])
    if cfg.has_section('mirror names'):
        mirror2name = dict([(m, codecs.decode(cfg.get('mirror names', m), 'utf-8'))
                            for m in cfg.options('mirrors')])
    if cfg.has_section('mirrors'):
        mirror2url = dict([(m, cfg.get('mirrors', m))
                           for m in cfg.options('mirrors')])
    if not args.template is None:
        templ_dir = os.path.dirname(args.template)
        templ_basename = os.path.basename(args.template)
        jinja_env = Environment(loader=FileSystemLoader(templ_dir))
        srclist_template = jinja_env.get_template(templ_basename)
    else:
        jinja_env = Environment(loader=PackageLoader('bigmess'))
        srclist_template = jinja_env.get_template('sources_lists.rst')
    print codecs.encode(
        srclist_template.render(code2name=code2relname,
                                mirror2name=mirror2name,
                                mirror2url=mirror2url),
        'utf-8')
Example #24
0
    def generate_homoglyph_confusables_typos(strHost):
        # swap characters to similar looking characters, based on Unicode's confusables.txt

        results = list()
        global _homoglyphs_confusables
        #Replace each homoglyph subsequence in the strHost with each replacement subsequence associated with the homoglyph subsequence
        for homoglyph_subsequence in _homoglyphs_confusables:
            idx = 0
            while 1:
                idx = strHost.find(homoglyph_subsequence, idx)
                if idx > -1:
                    for replacement_subsequence in _homoglyphs_confusables[homoglyph_subsequence]:
                        #Add with just one change
                        newhostname = strHost[:idx] + replacement_subsequence + strHost[idx + len(homoglyph_subsequence):]
                        try:
                            results.append(str(codecs.encode(newhostname, "idna"), "ascii"))
                        except UnicodeError:
                            #This can be caused by domain parts which are too long for IDNA encoding, so just skip it
                            pass

                        #Add with all occurrences changed
                        newhostname = strHost.replace(homoglyph_subsequence, replacement_subsequence)
                        try:
                            if newhostname not in results:
                                results.append(str(codecs.encode(newhostname, "idna"), "ascii"))
                        except UnicodeError:
                            #This can be caused by domain parts which are too long for IDNA encoding, so just skip it
                            pass

                    idx += len(homoglyph_subsequence)
                else:
                    break

        return results
Example #25
0
def encrypt(key, message):
    """Encrypts a string 'message' using AES with a 'key'.

    Args:
        message: the string that is to be encrypted using AES
        key: the key that is to be used for encryption; must have a size of 16, 24, or 32 bytes

    Returns:
        A string representing the encrypted version of 'message'
    """

    # TODO: Make the hex <-> byte <-> string conversions consistent throughout all functions

    message = _padding(message)
    message_blocks = _split_message(message, 16)

    IV = os.urandom(16)

    encrypter = AES.new(key, AES.MODE_CBC, IV)
    ciphertext_blocks = [encrypter.encrypt(message_block) for message_block in message_blocks]

    ciphertext_blocks_in_hex = [(codecs.encode(IV, "hex_codec")).decode("utf-8")] + [
        (codecs.encode(ciphertext_block, "hex_codec")).decode("utf-8") for ciphertext_block in ciphertext_blocks
    ]
    ciphertext = "".join(ciphertext_blocks_in_hex)

    return ciphertext
Example #26
0
def obfuscate_email(email, linktext=None, autoescape=None):
    """
    Given a string representing an email address,
    returns a mailto link with rot13 JavaScript obfuscation.

    Accepts an optional argument to use as the link text;
    otherwise uses the email address itself.
    """
    if autoescape:
        esc = conditional_escape
    else:
        def esc(x):
            return x

    email = re.sub(r'@', r'\\100', re.sub(r'\.', r'\\056', esc(email)))
    email = codecs.encode(email, 'rot13')

    if linktext:
        linktext = esc(linktext).encode('unicode-escape').decode()
        linktext = codecs.encode(linktext, 'rot13')
    else:
        linktext = email

    rotten_link = """<script type="text/javascript">document.write \
        ("<n uers=\\\"znvygb:%s\\\">%s<\\057n>".replace(/[a-zA-Z]/g, \
        function(c){return String.fromCharCode((c<="Z"?90:122)>=\
        (c=c.charCodeAt(0)+13)?c:c-26);}));</script>""" % (email, linktext)
    return mark_safe(rotten_link)
Example #27
0
 def _save_entry(self, **kwargs):
     self._initialize()
     the_id = kwargs.get('id', None)
     need_to_reset = False
     if the_id is None:
         the_entry = MachineLearning(group_id=self.group_id)
         existing = False
     else:
         the_entry = MachineLearning.query.filter_by(group_id=self.group_id, id=the_id).first()
         existing = True
     if the_entry is None:
         raise Exception("There was no entry in the database for id " + str(the_id) + " with group id " + str(self.group_id))
     if 'dependent' in kwargs:
         if existing and the_entry.dependent is not None and the_entry.dependent != kwargs['dependent']:
             need_to_reset = True
         the_entry.dependent = codecs.encode(pickle.dumps(kwargs['dependent']), 'base64').decode()
         the_entry.active = True
     if 'independent' in kwargs:
         if existing and the_entry.independent is not None and the_entry.independent != kwargs['independent']:
             need_to_reset = True
         the_entry.independent = codecs.encode(pickle.dumps(kwargs['independent']), 'base64').decode()
     if 'key' in kwargs:
         the_entry.key = kwargs['key']
     if 'info' in kwargs:
         the_entry.info = codecs.encode(pickle.dumps(kwargs['info']), 'base64').decode()
     the_entry.modtime = datetime.datetime.utcnow()
     if not existing:
         db.session.add(the_entry)
     db.session.commit()
     if need_to_reset:
         self.reset()
Example #28
0
    def send(self, number, captcha, message, sender):
        '''
        Rellena un formulario con los parametros y lo envia a la pagina de
        personal, para que esta haga el envío.
        Si el envío se realiza con exito devuelve True, en otro caso False.
        '''
        form = self.browser.get_forms()[0]
        form.set_all_readonly(False)

        sender = encode(sender, 'windows-1252', 'replace')
        message = encode(message, 'windows-1252', 'replace')

        form["Snb"] = number
        form["CODAREA"] = number[:3]
        form["NRO"] = number[3:]
        form["subname"] = number
        form["DE_MESG_TXT"] = sender
        form["sig"] = sender
        form["msgtext"] = message
        form["codigo"] = captcha
        form["MESG_TXT"] = message
        form["FormValidar"] = "validar"

        form.submit()

        error_message = "alerta(Verificá la configuración de tu navegador!)"
        error_message2 = "alerta(El código ingresado es incorrecto. Por favor reingresá el código!)"
        if error_message in self.browser.get_html():
            return False
        elif error_message2 in self.browser.get_html():
            return False
        else:
            return True
Example #29
0
    def pamWithUsernameAndPassword(self, username, password, service='login'):
        """
        Check username and password using PAM.

        Returns True if credentials are accepted, False otherwise.
        """
        pam_authenticate = self._getPAMAuthenticate()
        if not pam_authenticate:
            # PAM is not supported.
            return False

        # On Python2.7/OSX PAM require str not bytes.
        username = codecs.encode(username, 'utf-8')
        password = codecs.encode(password, 'utf-8')

        with self._executeAsAdministrator():
            # FIXME:3059:
            # PAM can be used without admin right but I have no idea why
            # it fails with errors like:
            # audit_log_acct_message() failed: Operation not permitted.
            checked = pam_authenticate(username, password, service)

        if checked is True:
            return True

        # For PAM account we don't know if this is a failure due to
        # a bad credentials or non existent credentials.
        # Credentials are always rejected.
        return False
Example #30
0
def hg2json(hg, weights):
  """
  output a JSON representation of a cdec hypegraph
  (see http://aclweb.org/aclwiki/index.php?title=Hypergraph_Format )
  """
  res = ''
  res += "{\n"
  res += '"weights":{'+"\n"
  a = []
  for i in weights:
    a.append( '%s:%f'%(json.dumps(i[0]), i[1]) )
  res += ", ".join(a)+"\n"
  res += "},\n"
  res += '"nodes":'+"\n"
  res += "[\n"
  a = []
  a.append( '{ "id":-1, "cat":"root", "span":[-1,-1] }' )
  for i in hg.nodes:
    a.append( '{ "id":%d, "cat":"%s", "span":[%d, %d] }'%(i.id, i.cat, i.span[0], i.span[1]) )
  res += ",\n".join(a)+"\n"
  res += "],\n"
  res += '"edges":'+"\n"
  res += "[\n"
  a = []
  for i in hg.edges:
    s = "{"
    s += '"head":%d'%(i.head_node.id)
    xs = ' "f":{'
    b = []
    for j in i.feature_values:
      b.append( '"%s":%s'%(j[0], j[1]) )
    xs += ", ".join(b)
    xs += "},"
    c = []
    for j in i.tail_nodes:
      c.append(str(j.id))
    if len(c) > 0:
      s += ', "tails":[ %s ],'%(",".join(c))
    else:
      s += ', "tails":[ -1 ],'
    s += xs
    f =  []
    for x in i.trule.f:
      if type(x) == type(u'x'):
        f.append(codecs.encode(x, 'utf-8'))
      else:
        f.append(str(x))
    e = []
    for x in i.trule.e:
      if type(x) == type(u'x'):
        e.append(codecs.encode(x, 'utf-8'))
      else:
        e.append(str(x))
    s += " \"rule\":\"%s ||| %s ||| %s\""%(str(i.trule.lhs), json.dumps(" ".join(f))[1:-1], json.dumps(" ".join(e))[1:-1])
    s += ' }'
    a.append(s)
  res += ",\n".join(a)+"\n"
  res += "]\n"
  res += "}\n"
  return res
Example #31
0
def test_app(b, init_chain_request):
    from bigchaindb import App
    from bigchaindb.tendermint_utils import calculate_hash
    from bigchaindb.common.crypto import generate_key_pair
    from bigchaindb.models import Transaction

    app = App(b)
    p = ProtocolHandler(app)

    data = p.process('info',
                     types.Request(info=types.RequestInfo(version='2')))
    res = next(read_messages(BytesIO(data), types.Response))
    assert res
    assert res.info.last_block_app_hash == b''
    assert res.info.last_block_height == 0
    assert not b.get_latest_block()

    p.process('init_chain', types.Request(init_chain=init_chain_request))
    block0 = b.get_latest_block()
    assert block0
    assert block0['height'] == 0
    assert block0['app_hash'] == ''

    pk = codecs.encode(init_chain_request.validators[0].pub_key.data,
                       'base64').decode().strip('\n')
    [validator] = b.get_validators(height=1)
    assert validator['public_key']['value'] == pk
    assert validator['voting_power'] == 10

    alice = generate_key_pair()
    bob = generate_key_pair()
    tx = Transaction.create([alice.public_key],
                            [([bob.public_key], 1)])\
                    .sign([alice.private_key])
    etxn = json.dumps(tx.to_dict()).encode('utf8')

    r = types.Request(check_tx=types.RequestCheckTx(tx=etxn))
    data = p.process('check_tx', r)
    res = next(read_messages(BytesIO(data), types.Response))
    assert res
    assert res.check_tx.code == 0

    r = types.Request()
    r.begin_block.hash = b''
    p.process('begin_block', r)

    r = types.Request(deliver_tx=types.RequestDeliverTx(tx=etxn))
    data = p.process('deliver_tx', r)
    res = next(read_messages(BytesIO(data), types.Response))
    assert res
    assert res.deliver_tx.code == 0

    new_block_txn_hash = calculate_hash([tx.id])

    r = types.Request(end_block=types.RequestEndBlock(height=1))
    data = p.process('end_block', r)
    res = next(read_messages(BytesIO(data), types.Response))
    assert res
    assert 'end_block' == res.WhichOneof('value')

    new_block_hash = calculate_hash([block0['app_hash'], new_block_txn_hash])

    data = p.process('commit', None)
    res = next(read_messages(BytesIO(data), types.Response))
    assert res.commit.data == new_block_hash.encode('utf-8')
    assert b.get_transaction(tx.id).id == tx.id

    block0 = b.get_latest_block()
    assert block0
    assert block0['height'] == 1
    assert block0['app_hash'] == new_block_hash

    # empty block should not update height
    r = types.Request()
    r.begin_block.hash = new_block_hash.encode('utf-8')
    p.process('begin_block', r)

    r = types.Request()
    r.end_block.height = 2
    p.process('end_block', r)

    data = p.process('commit', None)
    res = next(read_messages(BytesIO(data), types.Response))
    assert res.commit.data == new_block_hash.encode('utf-8')

    block0 = b.get_latest_block()
    assert block0
    assert block0['height'] == 2

    # when empty block is generated hash of previous block should be returned
    assert block0['app_hash'] == new_block_hash
Example #32
0
images = []
csv_dir = 'C:/Anode/Images'
df = pd.DataFrame()
image_data = ''
df["Tank"] = ''
for image_file in csvs:
    if image_file.path.endswith('.JPG'):
        basewidth = 300
        img = Image.open(image_file.path)
        wpercent = (basewidth / float(img.size[0]))
        hsize = int((float(img.size[1]) * float(wpercent)))
        img = img.resize((basewidth, hsize), Image.ANTIALIAS)
        resizedir = r'C:/Anode/resized/'
        if not os.path.exists(resizedir):
            os.makedirs(resizedir)
        img.save(resizedir + image_file.name)
        with open(resizedir + image_file.name, "rb") as f:
            data = f.read()
            img_data = (codecs.encode(obj=data, encoding="base64"))
            data = "data:image/jpeg;base64," + img_data.decode('utf-8')
            #data = data[1:].replace("'", "")
            print(len(data))
            name = image_file.name[:6]
            print(name)
            dataset = {
                'Tank': name,
                'Image': data,
            }
            df = df.append(dataset, ignore_index=True)
df.to_csv(r'C:\Anode\g.csv', index=False)
Example #33
0
def mock_lookup_spent_txid(self, txid, output_index):
    return txid


###############################################################################

ClientVals = collections.namedtuple(
    'ClientVals', ['deposit_tx', 'payment_tx', 'redeem_script'])
TEST_DEP_AMOUNT = 100000
TEST_DUST_AMOUNT = 1
TEST_PMT_AMOUNT = 5000
TEST_FEE_AMOUNT = 30000
TEST_EXPIRY = 86400
cust_wallet = MockTwo1Wallet()
merch_wallet = MockTwo1Wallet()
BAD_SIGNATURE = codecs.encode(
    cust_wallet._private_key.sign('fake').to_der(), 'hex_codec')
channel_server = PaymentServer(merch_wallet, testnet=True)
channel_server._blockchain = MockBlockchain()


def _create_client_txs():
    """Mock client transactions for opening a channel."""
    # Collect public keys
    expiration_time = int(time.time() + TEST_EXPIRY)
    customer_public_key = cust_wallet.get_payout_public_key()
    merchant_public_key = merch_wallet.get_payout_public_key()

    # Build redeem script
    redeem_script = PaymentChannelRedeemScript(merchant_public_key,
                                               customer_public_key,
                                               expiration_time)
Example #34
0
with open('lista_nombres', "r") as fp: 
     nombres= pickle.load(fp)

import codecs
from unidecode import unidecode

nombresupdate = []
for name_afi in nombres:
   try:
       name_afi = codecs.decode(name_afi, "utf-8")
       name_afi = unidecode(name_afi)
       nombresupdate.append(name_afi)
       
   except:
       try: 
           name_afi = codecs.encode(name_afi, "utf-8")
           name_afi = unidecode(name_afi)
           nombresupdate.append(name_afi)
               
       except:
           try:
               name_afi = codecs.decode(name_afi, "utf-8")
               name_afi = codecs.encode(name_afi, "utf-8")
               name_afi = unidecode(name_afi)
               nombresupdate.append(name_afi)
           except:
               try:
                   name_afi = codecs.encode(name_afi, "utf-8")
                   name_afi = codecs.decode(name_afi, "utf-8")
                   name_afi = unidecode(name_afi)
                   nombresupdate.append(name_afi)
Example #35
0
        def registers(self, target_id=0, thread_id=None, registers=[]):
            """
            Get the register values for a given target/thread.

            `target_id` is a target ID (or None for the first target)
            `thread_id` is a thread ID (or None for the selected thread)
            """
            # get the target
            target = self.host.GetTargetAtIndex(target_id)
            t_info = self._target(target_id)

            # get the thread
            if not thread_id:
                thread_id = target.process.selected_thread.id
            try:
                thread = target.process.GetThreadByID(thread_id)
            except:
                raise NoSuchThreadException()

            # if we got 'sp' or 'pc' in registers, change it to whatever the right name is for the current arch
            if t_info['arch'] in self.reg_names:
                if 'pc' in registers:
                    registers.remove('pc')
                    registers.append(self.reg_names[t_info['arch']]['pc'])
                if 'sp' in registers:
                    registers.remove('sp')
                    registers.append(self.reg_names[t_info['arch']]['sp'])
            else:
                raise Exception("Unsupported architecture: {}".format(
                    t_info['arch']))

            # get the registers
            regs = thread.GetFrameAtIndex(0).GetRegisters()

            # extract the actual register values
            objs = []
            for i in xrange(len(regs)):
                objs += regs[i]
            regs = {}
            for reg in objs:
                val = 'n/a'
                if reg.value is not None:
                    try:
                        val = reg.GetValueAsUnsigned()
                    except:
                        reg = None
                elif reg.num_children > 0:
                    try:
                        children = []
                        for i in xrange(reg.GetNumChildren()):
                            children.append(
                                int(
                                    reg.GetChildAtIndex(
                                        i, lldb.eNoDynamicValues, True).value,
                                    16))
                        if t_info['byte_order'] == 'big':
                            children = list(reversed(children))
                        val = int(
                            codecs.encode(
                                struct.pack('{}B'.format(len(children)),
                                            *children), 'hex'), 16)
                    except:
                        pass
                if registers == [] or reg.name in registers:
                    regs[reg.name] = val

            return regs
Example #36
0
    def main(self):
        self.parse_args()
        bzobj = bugzilla.RHBugzilla4(url=BUGZILLA_URL)
        current = []
        for line in self._repo.log([self._args.current]).splitlines():
            if line.lower().find('bug-url') >= 0:
                line = line.replace('show_bug.cgi?id=', '')
                try:
                    current.append(
                        int(line[line.find(BUGZILLA_SERVER) +
                                 len(BUGZILLA_SERVER) + 1:]))
                except ValueError as e:
                    if self._args.debug:
                        sys.stderr.write('Invalid input in %s: %s\n' % (
                            self._args.current,
                            line,
                        ))
                        sys.stderr.write(str(e))
        current.sort()

        previous = []
        for line in self._repo.log([self._args.previous]).splitlines():
            if line.lower().find('bug-url') >= 0:
                line = line.replace('show_bug.cgi?id=', '')
                try:
                    previous.append(
                        int(line[line.find(BUGZILLA_SERVER) +
                                 len(BUGZILLA_SERVER) + 1:]))
                except ValueError as e:
                    if self._args.debug:
                        sys.stderr.write('Invalid input in %s: %s\n' % (
                            self._args.previous,
                            line,
                        ))
                        sys.stderr.write(str(e))
        previous.sort()

        not_in_old = set(current) - set(previous)
        ids = list(not_in_old)
        ids.sort()

        list_url = "%sbuglist.cgi?action=wrap&bug_id=" % BUGZILLA_HOME
        for bug_id in ids:
            sys.stderr.write('fetching %d\n' % bug_id)
            queryobj = bzobj.build_query(bug_id=str(bug_id))
            ans = bzobj.query(queryobj)
            if ans:
                r = ans[0]
                if (r.product in (
                        'Red Hat Enterprise Virtualization Manager',
                        'oVirt',
                        'Red Hat Storage',
                        'Red Hat Gluster Storage',
                )) or (r.classification == 'oVirt'):
                    if r.status not in (
                            'MODIFIED',
                            'ON_QA',
                            'VERIFIED',
                            'RELEASE_PENDING',
                            'CLOSED',
                    ):
                        sys.stderr.write(
                            ("{bug} - is in status {status} and targeted "
                             "to {milestone}; "
                             "assignee: {assignee}\n").format(
                                 bug=bug_id,
                                 status=r.status,
                                 milestone=r.target_milestone,
                                 assignee=codecs.encode(
                                     r.assigned_to, "utf-8",
                                     "xmlcharrefreplace").decode(
                                         encoding='utf-8', errors='strict')))
                    elif (self._args.target_milestone is not None and
                          r.target_milestone != self._args.target_milestone):
                        sys.stderr.write(
                            "%d - is targeted to %s; assignee: %s\n" %
                            (bug_id, r.target_milestone,
                             codecs.encode(r.assigned_to, "utf-8",
                                           "xmlcharrefreplace").decode(
                                               encoding='utf-8',
                                               errors='strict')))
                    else:
                        list_url += "%s%%2C " % bug_id
                        sys.stdout.write(' - [BZ %s](%s%s)' %
                                         (str(r.id), BUGZILLA_HOME, str(r.id)))
                        sys.stdout.write(' - ')
                        if self._args.show_target:
                            sys.stdout.write(str(r.target_release))
                            sys.stdout.write(' - ')
                        if self._args.show_fixed:
                            sys.stdout.write(str(r.fixed_in))
                            sys.stdout.write(' - ')
                        sys.stdout.write(
                            codecs.encode(r.summary, "utf-8",
                                          "xmlcharrefreplace").decode(
                                              encoding='utf-8',
                                              errors='strict'))
                        sys.stdout.write('\n')
                else:
                    sys.stderr.write("%d - has product %s\n" %
                                     (bug_id, r.product))
            else:
                sys.stderr.write("%d - is a private bug\n" % bug_id)
                list_url += "%s%%2C " % bug_id

        sys.stderr.flush()
        sys.stdout.write('\n\n\n' + list_url + '\n')
        sys.stdout.flush()
Example #37
0
TYPE_CALLABLE_MAP = {
    FieldDescriptor.TYPE_DOUBLE: float,
    FieldDescriptor.TYPE_FLOAT: float,
    FieldDescriptor.TYPE_INT32: int,
    FieldDescriptor.TYPE_INT64: int,
    FieldDescriptor.TYPE_UINT32: int,
    FieldDescriptor.TYPE_UINT64: int,
    FieldDescriptor.TYPE_SINT32: int,
    FieldDescriptor.TYPE_SINT64: int,
    FieldDescriptor.TYPE_FIXED32: int,
    FieldDescriptor.TYPE_FIXED64: int,
    FieldDescriptor.TYPE_SFIXED32: int,
    FieldDescriptor.TYPE_SFIXED64: int,
    FieldDescriptor.TYPE_BOOL: bool,
    FieldDescriptor.TYPE_STRING: str,
    FieldDescriptor.TYPE_BYTES: lambda b: codecs.encode(b, encoding="base64"),
    FieldDescriptor.TYPE_ENUM: int,
}


def repeated(type_callable):
    return lambda value_list: [type_callable(value) for value in value_list]


def enum_label_name(field, value):
    return field.enum_type.values_by_number[int(value)].name


def protobuf_to_dict(pb,
                     type_callable_map=TYPE_CALLABLE_MAP,
                     use_enum_labels=False):
Example #38
0
def pretty_token(token):
    """Return a pretty string presentation for a token."""
    return codecs.encode(token, "hex").decode()
Example #39
0
def get_int(b):
    return int(codecs.encode(b, 'hex'), 16)
Example #40
0
def patchkeys(f, key):
    # Setup struct pack string
    key_pack = '=4sB4sB6xQ'
    # smc_old_memptr = 0
    smc_new_memptr = 0

    # Do Until OSK1 read
    i = 0
    while True:

        # Read key into struct str and data byte str
        offset = key + (i * 72)
        f.seek(offset)
        smc_key = struct.unpack(key_pack, f.read(24))
        smc_data = f.read(smc_key[1])

        # Reset pointer to beginning of key entry
        f.seek(offset)

        if smc_key[0] == b'SKL+':
            # Use the +LKS data routine for OSK0/1
            smc_new_memptr = smc_key[4]
            print('+LKS Key: ')
            printkey(i, offset, smc_key, smc_data)

        elif smc_key[0] == b'0KSO':
            # Write new data routine pointer from +LKS
            print('OSK0 Key Before:')
            printkey(i, offset, smc_key, smc_data)
            # smc_old_memptr = smc_key[4]
            f.seek(offset)
            f.write(struct.pack(key_pack, smc_key[0], smc_key[1], smc_key[2], smc_key[3], smc_new_memptr))
            f.flush()

            # Write new data for key
            f.seek(offset + 24)
            smc_new_data = codecs.encode('bheuneqjbexolgurfrjbeqfthneqrqcy', 'rot_13')
            f.write(smc_new_data.encode('UTF-8'))
            f.flush()

            # Re-read and print key
            f.seek(offset)
            smc_key = struct.unpack(key_pack, f.read(24))
            smc_data = f.read(smc_key[1])
            print('OSK0 Key After:')
            printkey(i, offset, smc_key, smc_data)

        elif smc_key[0] == b'1KSO':
            # Write new data routine pointer from +LKS
            print('OSK1 Key Before:')
            printkey(i, offset, smc_key, smc_data)
            smc_old_memptr = smc_key[4]
            f.seek(offset)
            f.write(struct.pack(key_pack, smc_key[0], smc_key[1], smc_key[2], smc_key[3], smc_new_memptr))
            f.flush()

            # Write new data for key
            f.seek(offset + 24)
            smc_new_data = codecs.encode('rnfrqbagfgrny(p)NccyrPbzchgreVap', 'rot_13')
            f.write(smc_new_data.encode('UTF-8'))
            f.flush()

            # Re-read and print key
            f.seek(offset)
            smc_key = struct.unpack(key_pack, f.read(24))
            smc_data = f.read(smc_key[1])
            print('OSK1 Key After:')
            printkey(i, offset, smc_key, smc_data)

            # Finished so get out of loop
            break

        else:
            pass

        i += 1
    return smc_old_memptr, smc_new_memptr
Example #41
0
    async def add_torrent_to_channel(self, request):
        channel_pk, channel_id = self.get_channel_from_request(request)
        with db_session:
            channel = self.mds.CollectionNode.get(public_key=channel_pk,
                                                  id_=channel_id)
        if not channel:
            return RESTResponse({"error": "Unknown channel"},
                                status=HTTP_NOT_FOUND)

        parameters = await request.json()

        extra_info = {}
        if parameters.get('description', None):
            extra_info = {'description': parameters['description']}

        # First, check whether we did upload a magnet link or URL
        if parameters.get('uri', None):
            uri = parameters['uri']
            if uri.startswith("http:") or uri.startswith("https:"):
                data = await _fetch_uri(uri)
                tdef = TorrentDef.load_from_memory(data)
            elif uri.startswith("magnet:"):
                _, xt, _ = parse_magnetlink(uri)
                if (xt and is_infohash(codecs.encode(xt, 'hex'))
                        and (self.mds.torrent_exists_in_personal_channel(xt)
                             or channel.copy_torrent_from_infohash(xt))):
                    return RESTResponse({"added": 1})

                meta_info = await self.download_manager.get_metainfo(
                    xt, timeout=30, url=uri)
                if not meta_info:
                    raise RuntimeError("Metainfo timeout")
                tdef = TorrentDef.load_from_dict(meta_info)
            else:
                return RESTResponse({"error": "unknown uri type"},
                                    status=HTTP_BAD_REQUEST)

            added = 0
            if tdef:
                channel.add_torrent_to_channel(tdef, extra_info)
                added = 1
            return RESTResponse({"added": added})

        torrents_dir = None
        if parameters.get('torrents_dir', None):
            torrents_dir = parameters['torrents_dir']
            if not Path(torrents_dir).is_absolute():
                return RESTResponse(
                    {"error": "the torrents_dir should point to a directory"},
                    status=HTTP_BAD_REQUEST)

        recursive = False
        if parameters.get('recursive'):
            recursive = parameters['recursive']
            if not torrents_dir:
                return RESTResponse(
                    {
                        "error":
                        "the torrents_dir parameter should be provided when the recursive parameter is set"
                    },
                    status=HTTP_BAD_REQUEST,
                )

        if torrents_dir:
            torrents_list, errors_list = channel.add_torrents_from_dir(
                torrents_dir, recursive)
            return RESTResponse({
                "added": len(torrents_list),
                "errors": errors_list
            })

        if not parameters.get('torrent', None):
            return RESTResponse({"error": "torrent parameter missing"},
                                status=HTTP_BAD_REQUEST)

        # Try to parse the torrent data
        # Any errors will be handled by the error_middleware
        torrent = base64.b64decode(parameters['torrent'])
        torrent_def = TorrentDef.load_from_memory(torrent)
        channel.add_torrent_to_channel(torrent_def, extra_info)
        return RESTResponse({"added": 1})
Example #42
0
def escape_unicode_characters(s):
    return codecs.encode(s, "unicode_escape").decode("ascii")
Example #43
0
 def encrypt(s):
     print ("Encrypting {!r}".format(s))
     return codecs.encode(s, 'rot-13')
Example #44
0
def make_unix_newline(buf):
    decoded = codecs.decode(buf, 'utf-8')
    decoded = decoded.replace('\r', '')
    return codecs.encode(decoded, 'utf-8')
Example #45
0
def quote_string(obj):
    return "'{}'".format(
        codecs.encode(obj, 'unicode_escape').replace("'", "\\'"))
Example #46
0
def get_whois_raw(domain,
                  server="",
                  previous=None,
                  rfc3490=True,
                  never_cut=False,
                  with_server_list=False,
                  server_list=None):
    previous = previous or []
    server_list = server_list or []
    # Sometimes IANA simply won't give us the right root WHOIS server
    exceptions = {
        ".ac.uk": "whois.ja.net",
        ".ps": "whois.pnina.ps",
        ".buzz": "whois.nic.buzz",
        ".moe": "whois.nic.moe",
        # The following is a bit hacky, but IANA won't return the right answer for example.com because it's a direct registration.
        "example.com": "whois.verisign-grs.com"
    }

    if rfc3490:
        if sys.version_info < (3, 0):
            domain = encode(
                domain if type(domain) is unicode else decode(domain, "utf8"),
                "idna")
        else:
            domain = encode(domain, "idna").decode("ascii")

    if len(previous) == 0 and server == "":
        # Root query
        is_exception = False
        for exception, exc_serv in exceptions.items():
            if domain.endswith(exception):
                is_exception = True
                target_server = exc_serv
                break
        if is_exception == False:
            target_server = get_root_server(domain)
    else:
        target_server = server
    if target_server == "whois.jprs.jp":
        request_domain = "%s/e" % domain  # Suppress Japanese output
    elif domain.endswith(".de") and (target_server == "whois.denic.de" or
                                     target_server == "de.whois-servers.net"):
        request_domain = "-T dn,ace %s" % domain  # regional specific stuff
    elif target_server == "whois.verisign-grs.com":
        request_domain = "=%s" % domain  # Avoid partial matches
    else:
        request_domain = domain
    response = whois_request(request_domain, target_server)
    if never_cut:
        # If the caller has requested to 'never cut' responses, he will get the original response from the server (this is
        # useful for callers that are only interested in the raw data). Otherwise, if the target is verisign-grs, we will
        # select the data relevant to the requested domain, and discard the rest, so that in a multiple-option response the
        # parsing code will only touch the information relevant to the requested domain. The side-effect of this is that
        # when `never_cut` is set to False, any verisign-grs responses in the raw data will be missing header, footer, and
        # alternative domain options (this is handled a few lines below, after the verisign-grs processing).
        new_list = [response] + previous
    if target_server == "whois.verisign-grs.com":
        # VeriSign is a little... special. As it may return multiple full records and there's no way to do an exact query,
        # we need to actually find the correct record in the list.
        for record in response.split("\n\n"):
            if re.search("Domain Name: %s\n" % domain.upper(), record):
                response = record
                break
    if never_cut == False:
        new_list = [response] + previous
    server_list.append(target_server)
    for line in [x.strip() for x in response.splitlines()]:
        match = re.match(
            "(refer|whois server|referral url|whois server|registrar whois):\s*([^\s]+\.[^\s]+)",
            line, re.IGNORECASE)
        if match is not None:
            referal_server = match.group(2)
            if referal_server != server and "://" not in referal_server:  # We want to ignore anything non-WHOIS (eg. HTTP) for now.
                # Referal to another WHOIS server...
                return get_whois_raw(domain,
                                     referal_server,
                                     new_list,
                                     server_list=server_list,
                                     with_server_list=with_server_list)
    if with_server_list:
        return (new_list, server_list)
    else:
        return new_list
def _bytes_to_int(b):
    if not b or not isinstance(b, bytes):
        raise ValueError("b must be non-empty byte string")

    return int(codecs.encode(b, "hex"), 16)
Example #48
0
def str2hex(str):
    """
    Convert a string to hex encoded format
    """
    result = codecs.encode(str, 'hex')
    return result
Example #49
0
def get(name, imdb, season, episode):
    try:
        langs = []
        try:
            try: langs = langDict[control.setting('subtitles.lang.1')].split(',')
            except: langs.append(langDict[control.setting('subtitles.lang.1')])
        except: pass
        try:
            try: langs = langs + langDict[control.setting('subtitles.lang.2')].split(',')
            except: langs.append(langDict[control.setting('subtitles.lang.2')])
        except: pass

        try: subLang = xbmc.Player().getSubtitles()
        except: subLang = ''
        if subLang == langs[0]: raise Exception()

        server = xmlrpclib.Server('http://api.opensubtitles.org/xml-rpc', verbose=0)
        token = server.LogIn('', '', 'en', 'XBMC_Subtitles_v1')['token']

        sublanguageid = ','.join(langs) ; imdbid = re.sub('[^0-9]', '', imdb)

        if not (season == '' or episode == ''):
            result = server.SearchSubtitles(token, [{'sublanguageid': sublanguageid, 'imdbid': imdbid, 'season': season, 'episode': episode}])['data']
            fmt = ['hdtv']
        else:
            result = server.SearchSubtitles(token, [{'sublanguageid': sublanguageid, 'imdbid': imdbid}])['data']
            try: vidPath = xbmc.Player().getPlayingFile()
            except: vidPath = ''
            fmt = re.split('\.|\(|\)|\[|\]|\s|\-', vidPath)
            fmt = [i.lower() for i in fmt]
            fmt = [i for i in fmt if i in quality]

        filter = []
        result = [i for i in result if i['SubSumCD'] == '1']

        for lang in langs:
            filter += [i for i in result if i['SubLanguageID'] == lang and any(x in i['MovieReleaseName'].lower() for x in fmt)]
            filter += [i for i in result if i['SubLanguageID'] == lang and any(x in i['MovieReleaseName'].lower() for x in quality)]
            filter += [i for i in result if i['SubLanguageID'] == lang]

        try: lang = xbmc.convertLanguage(filter[0]['SubLanguageID'], xbmc.ISO_639_1)
        except: lang = filter[0]['SubLanguageID']

        content = [filter[0]['IDSubtitleFile'],]
        content = server.DownloadSubtitles(token, content)
        content = base64.b64decode(content['data'][0]['data'])
        content = str(zlib.decompressobj(16+zlib.MAX_WBITS).decompress(content))

        subtitle = xbmc.translatePath('special://temp/')
        subtitle = os.path.join(subtitle, 'TemporarySubs.%s.srt' % lang)

        codepage = codePageDict.get(lang, '')
        if codepage and control.setting('subtitles.utf') == 'true':
            try:
                content_encoded = codecs.decode(content, codepage)
                content = codecs.encode(content_encoded, 'utf-8')
            except:
                pass

        file = control.openFile(subtitle, 'w')
        file.write(str(content))
        file.close()

        xbmc.sleep(1000)
        xbmc.Player().setSubtitles(subtitle)
    except:
        pass
Example #50
0
def main(args):
    from ptvsd import adapter
    from ptvsd.common import compat, log
    from ptvsd.adapter import ide, servers, sessions

    if args.log_stderr:
        log.stderr.levels |= set(log.LEVELS)
    if args.log_dir is not None:
        log.log_dir = args.log_dir

    log.to_file(prefix="ptvsd.adapter")
    log.describe_environment("ptvsd.adapter startup environment:")

    if args.for_server and args.port is None:
        log.error("--for-server requires --port")
        sys.exit(64)

    servers.access_token = args.server_access_token
    if not args.for_server:
        adapter.access_token = compat.force_str(
            codecs.encode(os.urandom(32), "hex"))

    server_host, server_port = servers.listen()
    ide_host, ide_port = ide.listen(port=args.port)
    endpoints_info = {
        "ide": {
            "host": ide_host,
            "port": ide_port
        },
        "server": {
            "host": server_host,
            "port": server_port
        },
    }

    if args.for_server:
        log.info("Writing endpoints info to stdout:\n{0!r}", endpoints_info)
        print(json.dumps(endpoints_info))
        sys.stdout.flush()

    if args.port is None:
        ide.IDE("stdio")

    listener_file = os.getenv("PTVSD_ADAPTER_ENDPOINTS")
    if listener_file is not None:
        log.info("Writing endpoints info to {0!r}:\n{1!r}", listener_file,
                 endpoints_info)

        def delete_listener_file():
            log.info("Listener ports closed; deleting {0!r}", listener_file)
            try:
                os.remove(listener_file)
            except Exception:
                log.exception("Failed to delete {0!r}",
                              listener_file,
                              level="warning")

        with open(listener_file, "w") as f:
            atexit.register(delete_listener_file)
            print(json.dumps(endpoints_info), file=f)

    # These must be registered after the one above, to ensure that the listener sockets
    # are closed before the endpoint info file is deleted - this way, another process
    # can wait for the file to go away as a signal that the ports are no longer in use.
    atexit.register(servers.stop_listening)
    atexit.register(ide.stop_listening)

    servers.wait_until_disconnected()
    log.info(
        "All debug servers disconnected; waiting for remaining sessions...")

    sessions.wait_until_ended()
    log.info("All debug sessions have ended; exiting.")
Example #51
0
    kernel_func = r'''
BUFFER_SIZE = 0x10
ERROR_POINT = 12 # taken from types.h

def get_initramfs(user_initram):
    buffer(user_initram, BUFFER_SIZE)
    pointer(initram)
    pointer(sizeptr)

    initram = KERNEL_kallsyms_lookup_name("__initramfs_start")
    sizeptr = KERNEL_kallsyms_lookup_name("__initramfs_size")
    if initram == 0 or sizeptr == 0:
        raise ERROR_POINT

    size = DEREF(sizeptr)
    if size < 0 or size > BUFFER_SIZE:
        size = BUFFER_SIZE
    KERNEL_memcpy(user_initram, initram, size)
    return size
'''

    plug = kplugs.Plug(ip='127.0.0.1')
    buf = bytearray(0x10)
    get_initramfs = plug.compile(kernel_func)[0]
    size = get_initramfs(buf)
    print("The initramfs starts with: '%s'" % (codecs.encode(bytes(buf[:size]), "hex").decode(), ))

finally:
    kplugs.release_kplugs()

Example #52
0
import time
import string
import codecs

# # Index	    0	1	2	3	4	5	6	7	8	9	10	11	12	13	14	15	16	17	18	19	20	21	22	23	24	25
# # English	a	b	c	d	e	f	g	h	i	j	k	l	m	n	o	p	q	r	s	t	u	v	w	x	y	z
# # ROT+13	n	o	p	q	r	s	t	u	v	w	x	y	z	a	b	c	d	e	f	g	h	i	j	k	l	m
initial = input("Would you like to encrypt or decrypt?\n:")
user_in = input("Message to Encrypt:\n")
# # def rot13(user_er_in)times would you like to rotate the message?:\n"))
encryption_1 = codecs.encode(user_in, 'rot13')
print("ENCRYPTING")
time.sleep(3)
print(encryption_1)
Example #53
0
def rot13(some_str):
    return codecs.encode(some_str, 'rot_13')
Example #54
0
            encp = encp + 13
            #print(chr(encp),end='')
            out_str += chr(encp)
            """if ascii corresponds to alphabets above m subtract 13 from the ascii and findcorresponding letter"""
        elif (110 <= encp <= 122) or (78 <= encp <= 90):
            encp = encp - 13
            #print(chr(encp),end='')
            out_str += chr(encp)
        else:
            #print(i,end='')
            out_str += chr(encp)
    return out_str


#Test block
if __name__ == '__main__':
    print("Main module is being run directly ")
    Myoutstrng = rot13("Hello")
    #using the rot_13 algorithm to test my code
    assert (Myoutstrng) == codecs.encode('Hello', 'rot_13')
    Myoutstrng = rot13("Zntargvp sebz bhgfvqr arne pbeare")
    assert (Myoutstrng) == codecs.encode('Zntargvp sebz bhgfvqr arne pbeare',
                                         'rot_13')
    Myoutstrng = rot13("Zntargvpjij**")

    assert (
        Myoutstrng
    ) == 'deewhuhfu', "Asssertion Error,Expected is :%s" % codecs.encode(
        'Zntargvpjij**', 'rot_13')
else:
    print("rot_13.py is being imported into another module ")
Example #55
0
 def is_ascii(domainname):
     return str(codecs.encode(domainname, "idna"), "ascii") == domainname
Example #56
0
def u2a(data):
    return str(
        codecs.decode(codecs.encode(data, 'ascii', 'ignore'), 'ascii',
                      'ignore'))
Example #57
0
def main(args):

    print("\nScaffolding...")

    # Variables #
    appname = args.appname
    fullpath = os.path.join(cwd, appname)
    skeleton_dir = args.skeleton

    # Copy files and folders
    print("Copying files and folders...")
    shutil.copytree(os.path.join(script_dir, skeleton_dir), fullpath)

    # Create config.py
    print("Creating the config...")
    secret_key = codecs.encode(os.urandom(32), 'hex').decode('utf-8')
    template = template_env.get_template('config.jinja2')
    template_var = {
        'secret_key': secret_key,
    }
    with open(os.path.join(fullpath, 'project', 'config.py'), 'w') as fd:
        fd.write(template.render(template_var))

    # Add bower dependencies
    if args.bower:
        print("Adding bower dependencies...")
        bower = args.bower.split(',')
        bower_exe = which('bower')
        if bower_exe:
            os.chdir(os.path.join(fullpath, 'project', 'static'))
            for dependency in bower:
                output, error = subprocess.Popen(
                    [bower_exe, 'install', dependency],
                    stdout=subprocess.PIPE,
                    stderr=subprocess.PIPE).communicate()
                if error:
                    print("An error occurred with Bower")
                    print(error)
        else:
            print("Could not find bower. Ignoring.")

    # Add a virtualenv
    virtualenv = args.virtualenv
    if virtualenv:
        print("Adding a virtualenv...")
        virtualenv_exe = which('virtualenv')
        print virtualenv_exe
        if virtualenv_exe:
            output, error = subprocess.Popen(
                [virtualenv_exe, os.path.join(fullpath, 'env')],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE).communicate()
            if error:
                with open('virtualenv_error.log', 'w') as fd:
                    fd.write(error.decode(encoding="utf-8", errors='strict'))
                    print("An error occurred with virtualenv")
                    sys.exit(2)
            venv_bin = os.path.join(fullpath, 'env/bin')
            output, error = subprocess.Popen(
                [
                    os.path.join(venv_bin, 'pip'), 'install', '-r',
                    os.path.join(fullpath, 'requirements.txt')
                ],
                stdout=subprocess.PIPE,
                stderr=subprocess.PIPE).communicate()
            if error:
                with open('pip_error.log', 'w') as fd:
                    fd.write(error.decode(encoding="utf-8", errors='strict'))
                    sys.exit(2)
        else:
            print("Could not find virtualenv executable. Ignoring")

    # Git init
    if args.git:
        print("Initializing Git...")
        output, error = subprocess.Popen(['git', 'init', fullpath],
                                         stdout=subprocess.PIPE,
                                         stderr=subprocess.PIPE).communicate()
        if error:
            with open('git_error.log', 'w') as fd:
                fd.write(error.decode(encoding="utf-8", errors='strict'))
                print("Error with git init")
                sys.exit(2)
        shutil.copyfile(os.path.join(script_dir, 'templates', '.gitignore'),
                        os.path.join(fullpath, '.gitignore'))
Example #58
0
    def loadconfusables():
        global _homoglyphs_confusables
        _homoglyphs_confusables = dict()
        rejected_sequences = set()

        #'utf_8_sig' swallows the BOM at start of file
        with open("datasources/confusables.txt", "r",
                  encoding="'utf_8_sig") as f:
            for line in f:
                #If line contains more than whitespace and isn't a comment
                if line.strip() and not line.startswith("#"):
                    split = line.split(';', maxsplit=2)
                    #parse the left hand side of the pairing
                    unihex = split[0].split(' ')[0]
                    part0 = (chr(int(unihex, 16)))

                    if part0 in rejected_sequences:
                        continue

                    #parse the right hand side of the pairing
                    part1 = ''
                    for unihex in split[1].strip().split(' '):
                        part1 += (chr(int(unihex, 16)))

                    if part1 in rejected_sequences:
                        continue

                    #Skip pairs already in the _homoglyphs dict
                    if part0 in _homoglyphs_confusables and part1 in _homoglyphs_confusables[
                            part0]:
                        continue

                    try:
                        #filter out glyphs which do not survive round trip conversion, e.g. ß -> ss -> ss
                        if 'a' + part0 + 'b' != codecs.decode(
                                codecs.encode('a' + part0 + 'b', "idna"),
                                "idna"):
                            rejected_sequences.add(part0)
                            continue
                    except UnicodeError:
                        #Some characters/combinations will fail the nameprep stage
                        rejected_sequences.add(part0)
                        continue

                    try:
                        #filter out glyphs which do not survive round trip conversion, e.g. ß -> ss -> ss
                        if 'a' + part1 + 'b' != codecs.decode(
                                codecs.encode('a' + part1 + 'b', "idna"),
                                "idna"):
                            rejected_sequences.add(part1)
                            continue
                    except UnicodeError:
                        #Some characters/combinations will fail the nameprep stage
                        rejected_sequences.add(part1)
                        continue

                    #Include left to right pair mapping in the dict
                    if part0 not in _homoglyphs_confusables:
                        _homoglyphs_confusables[part0] = set()
                    _homoglyphs_confusables[part0].add(part1)

                    #Include right to left pair mapping in the dict
                    if part1 not in _homoglyphs_confusables:
                        _homoglyphs_confusables[part1] = set()
                    _homoglyphs_confusables[part1].add(part0)
Example #59
0
def obj_to_pickle_string(x):
    return codecs.encode(pickle.dumps(x), "base64").decode()
Example #60
0
def unescape_string(s):
    return decode(encode(s, 'latin-1', 'backslashreplace'), 'unicode-escape')