Пример #1
0
def proSocket(host, port, cmd, type=None):
	'''
	@host: 主机IP
	@port: 主机端口
	@cmd:  指令
	'''

	data = ''
	d = ''
	try:
		sock = socket.socket()
		sock.settimeout(SOCKET['PROTIMEOUT'])
		sock.connect((host, port))
		print encode(cmd)	
		if type:
			sock.send(encode(cmd + type))
		else:
			sock.send(encode(cmd))

		while True:
			d = sock.recv(SOCKET['BUFSIZE'])
			data = data + d
			time.sleep(SOCKET['DELAY'])
			if not d:
				break
		try:
			data = decode(data)
		except  Exception as e:
			
			log_error('decode error:'+str(e) + ' Try increasing the delay.')
			sock.send(encode(cmd))
			
			data=''
			d=''
			
			while True:
				d = sock.recv(SOCKET['BUFSIZE'])
				data = data + d
				time.sleep(SOCKET['DELAY']+1)
				if not d:
					break
			try:
				data = decode(data)
			except  Exception as e:
				
				log_error('recv decode error:'+str(e))
				return False
	except Exception as e:
		
		log_error('ip: '+host+' , port: '+str(port)+' , proSocket other error: '+str(e))
		sock.close()
		return False
	
	return data
Пример #2
0
def RproSocket(host, port, cmd, type=None):
    '''
	@host: 主机IP
	@port: 主机端口
	@cmd:  指令
	'''
    cmd = str(cmd)
    # 开始通信
    try:
        i = 0
        while True:
            time.sleep(round(float(random.randrange(0, 10000, 1)) / 10000, 4))
            data = socketSend(host, port, cmd, type='result')

            if data == False:
                continue

            if decode(data) == 'result_send_ok' or i > 10:
                break
            i = i + 1

    except Exception as e:
        save_log('ERROR', 'RproSocket error:' + str(e))
        return False

    return 1
Пример #3
0
    def read(self):

        if not self.parsed:
            self.parse_header()

        f = open(self.filename, "br")

        i = 0
        read_till = 0

        while True:
            try:
                f.seek(-i,SEEK_CUR)
            except OSError:
                print("Footer niet gevonden, bestand is incorrect. Error!")
                f.close()
                return None

            data = f.read(i-read_till)

            try:
                footer = decode(data)
                #print(footer)

                if b"root_offset" in footer:
                    break
                else:
                    read_till = i

            except:
                i += 1

        print("Footer: " + str(footer))

        return footer
Пример #4
0
    def __getitem__(self, key):
        """
        Returns the value that corresponds with the key, if the key is not
        present, None is returned.
        """

        # If values are not offsets of documents, but the document itself, this
        # should be uncommented:
        # if key in self.bucket:
            # return self.bucket[key]
        # return None

        if key in self.bucket:
            offset = self.bucket[key]
            f = open(self.tree.filename, "br")
            i = 0
            while True:
                f.seek(offset)
                data = f.read(i)
                try:
                    doc_data = decode(check_integrity(data))
                    # print(doc_data)
                    break
                except:
                    i += 1
            f.close()
            if type(doc_data) is bytes or type(doc_data) is bytearray:
                return doc_data.decode("utf-8")
            elif type(doc_data) is list:
                return [item.decode("utf-8") if type(item) is bytes else item for item in doc_data]
            else:
                return doc_data

        pass
Пример #5
0
def get_last_footer(filename):
    try:
        f = open(filename, "br")
    except FileNotFoundError:
        print("File was not found, using a new file with name: " + filename)
        f = open(filename, "w")
        f.close()
        return None

    i = 0
    read_till = 0

    while True:
        try:
            f.seek(-i,2)
        except OSError:
            print("Could not retrieve footer, file is incorrect")
            f.close()
            return None

        data = f.read(i-read_till)
        try:
            footer = decode(check_integrity(data))
            # print(footer)
            if b"root_offset" in footer:
                break
            else:
                read_till = i

        except:
            i += 1


    print("Footer: " + str(footer))
    return footer
Пример #6
0
def parse(url) :
    reps = {
    'wp': {},
    'names': {},
    'links': {},
    }
    d = cache.cachewp (url)
    myparser = lxml.etree.HTMLParser(encoding="utf-8")
    html = lxml.etree.HTML(d, parser=myparser)
    for r in html.xpath("//ol/li") :
        for l in r.xpath("a"):
            f_name_link = l.get("href")
            f_name_element = l.text

            obj = {
                'links' :   {
                    'homepage' : {}
                },
                'link' :   f_name_link,
                'name' : f_name_element
            }
            link = re.search("/([^\/]+)$",f_name_link).group(1)          
            link = urllib.unquote(link)
            link = encode.decode(link)

            """ we are going to collect all the links and point to the object """ 
#            print link, f_name_element,  f_name_link 
            reps['wp'][link]= parse_ballotwiki_page(f_name_link,reps,obj)
            reps['names'][f_name_element]= obj

    return reps
Пример #7
0
def start_training():
    alice_key, bob_key = dh_exchange()
    print('Key has been chosen !', alice_key, bob_key)
    np.random.seed(alice_key)

    from auto_encoder import train
    from encode import encode, decode

    print('Starting training ...')
    train(Callback2(), set_auto_encoder)

    alice_sentence = "bonjour bob 123456"

    encrypted_sentence = encode(alice_sentence)

    decrypted_sentence = decode(encrypted_sentence)

    print('Original sentence was', alice_sentence)
    print('Encrypted sentence was',
          np.array(encrypted_sentence).reshape((18, 10)))
    img = Image.fromarray(
        np.array(encrypted_sentence).reshape((18, 10)) * 255 /
        np.max(np.max(np.array(encrypted_sentence).reshape((18, 10)))))
    img.show()
    print('Decrypted sentence is', decrypted_sentence)
Пример #8
0
def RproSocket(host, port, cmd, type=None):
	'''
	@host: 主机IP
	@port: 主机端口
	@cmd:  指令
	'''
	cmd = str(cmd)
	# 开始通信
	try:
		i = 0
		while True:
			time.sleep(round(float(random.randrange(0, 10000, 1))/10000,4))
			data = socketSend(host,port,cmd,type='result')

			if data == False:
				continue

			if decode(data) == 'result_send_ok' or i > 10:
				break
			i = i + 1		
			
	except Exception as e:
		save_log('ERROR','RproSocket error:'+ str(e))
		return False
	
	return 1
Пример #9
0
    def OnDoubleClick(self, event):
        item = self.tree.focus()

        # Copies password to clipboard
        service = self.tree.item(item, "values")[0]
        var = self.data[service][1]
        var = encode.decode(var)
        pyperclip.copy(var)
Пример #10
0
    def OnDoubleClick(self, event):
        item = self.tree.focus()

        # Copies password to clipboard
        service = self.tree.item(item, "values")[0]
        var = self.data[service][1]
        var = encode.decode(var)
        pyperclip.copy(var)
Пример #11
0
def proSocket(host, port, cmd, type=None):
    '''
	@host: 主机IP
	@port: 主机端口
	@cmd:  指令
	'''

    data = ''
    try:
        i = 0
        while True:
            data = socketSend(host, port, cmd, type=None)
            if data or i > 6:
                break
            i = i + 1

        try:
            data = decode(data)

        except Exception as e:

            time.sleep(SOCKET['DELAY'] + 1)

            i = 0
            while True:
                data = socketSend(host, port, cmd, type=None)
                if data or i > 3:
                    break
                i = i + 1

            try:
                data = decode(data)
            except Exception as e:

                save_log('ERROR', 'recv decode error:' + str(e) + str(host))
                return False
    except Exception as e:

        save_log(
            'ERROR', 'ip: ' + host + ' , port: ' + str(port) +
            ' , proSocket other error: ' + str(e))
        #sock.close()
        return False

    return data
Пример #12
0
def proSocket(host, port, cmd, type=None):
	'''
	@host: 主机IP
	@port: 主机端口
	@cmd:  指令
	'''

	data = ''
	try:
		i = 0
		while True:
			data = socketSend(host,port,cmd,type=None)
			if data or i > 6:
				break
			i = i + 1		
	
		try:
			data = decode(data)
			
		except  Exception as e:
		
			time.sleep(SOCKET['DELAY']+1)
			
			i = 0
                	while True:
                        	data = socketSend(host,port,cmd,type=None)
                        	if data or i > 3:
                                	break
                        	i = i + 1		
	
			try:
				data = decode(data)
			except  Exception as e:
				
				save_log('ERROR','recv decode error:'+str(e)+str(host))
				return False
	except Exception as e:
		
		save_log('ERROR','ip: '+host+' , port: '+str(port)+' , proSocket other error: '+str(e))
	
		return False
	
	return data
Пример #13
0
    def from_file(filename='db.db'):
        with open(filename, 'br') as db:
            tree_pos = 0
            tree = Tree(filename=filename)

            size = os.path.getsize(filename)

            # Try finding the footer by trying to decode a footer and starting
            # one byte further away from the end of the file if that doesn't
            # work
            for i in range(size):
                db.seek(-i, 2)  # Search at the position of this file
                try:
                    footer = decode(db, tree)
                    if 'type' in footer and footer['type'] == 'footer':
                        tree_pos = footer['tree']
                        break
                except Exception:
                    pass

            db.seek(tree_pos)
            decoded_tree = decode(db, tree)
            return decoded_tree
Пример #14
0
def parse_rep():
    reps = {
        'wp': {},
        'names': {},
        'links': {},
    }
    d = cache.cachewp(
        'http://en.wikipedia.org/wiki/Current_members_of_the_United_States_House_of_Representatives?printable=yes'
    )

    myparser = lxml.etree.HTMLParser(encoding="utf-8")
    html = lxml.etree.HTML(d, parser=myparser)

    tables = html.xpath("//table")
    table = tables[1]
    for r in table.xpath("//tr"):
        data = r.xpath("td")
        if (len(data) == 10):
            f_district = data[1]
            f_image = data[2]
            f_name = data[3]
            f_name_link = ""
            f_name_element = ""
            f_district_link = ""
            for l in f_name.xpath("span/span/a"):
                f_name_link = l.get("href")
                f_name_element = l.text

            for l in f_district.xpath("span/span/a"):
                f_district_link = l.get("href")
            obj = {
                'links': {
                    #                    'congbio' : '',
                    'homepage': {}
                },
                'link': f_name_link,
                'district': f_district_link,
                'name': f_name_element
            }
            link = re.search("/([^\/]+)$", f_name_link).group(1)
            link = urllib.unquote(link)
            link = encode.decode(link)
            """ we are going to collect all the links and point to the object """
            reps['wp'][link] = wiki.parse_wiki_page(f_name_link, reps, obj)
            reps['names'][f_name_element] = obj

    return reps
def parse_rep() :
    reps = {
    'wp': {},
    'names': {},
    'links': {},
    }
    d = cache.cachewp ('http://en.wikipedia.org/wiki/Current_members_of_the_United_States_House_of_Representatives?printable=yes')

    myparser = lxml.etree.HTMLParser(encoding="utf-8")
    html = lxml.etree.HTML(d, parser=myparser)

    tables = html.xpath("//table")
    table = tables[1]
    for r in table.xpath("//tr") :
        data= r.xpath("td")
        if( len(data) == 10):
            f_district = data[1]
            f_image     = data[2]
            f_name     = data[3]
            f_name_link = ""
            f_name_element = ""
            f_district_link=""
            for l in f_name.xpath("span/span/a"):
                f_name_link = l.get("href")
                f_name_element = l.text

            for l in f_district.xpath("span/span/a"):
                f_district_link = l.get("href")
            obj = {
                'links' :   {
#                    'congbio' : '',
                    'homepage' : {}
                },
                'link' :   f_name_link,
                'district' :  f_district_link,
                'name' : f_name_element
            }
            link = re.search("/([^\/]+)$",f_name_link).group(1)          
            link = urllib.unquote(link)
            link = encode.decode(link)

            """ we are going to collect all the links and point to the object """ 
            reps['wp'][link]= wiki.parse_wiki_page(f_name_link,reps,obj)
            reps['names'][f_name_element]= obj

    return reps
Пример #16
0
    def _load(self):
        """
        Load the node from disk.
        """

        f = open(self.tree.filename, "br")
        i = 0
        while True:
            f.seek(self.offset)
            data = f.read(i)
            try:
                node_dict = decode(check_integrity(data))
                break
            except:
                i += 1
        f.close()

        # print("Load offset: " + str(self.offset))
        # print(node_dict)

        if node_dict[b"type"] == b"Node":
            new_node = Node(tree=self.tree)
            entries = node_dict[b"entries"]
            # print(entries)

            for (key, value) in entries.items():
                new_node.bucket[key.decode("utf-8")] = LazyNode(offset=value, tree=self.tree)

            if b"rest" in node_dict:
                new_node.rest = LazyNode(offset=node_dict[b"rest"], tree=self.tree)

            return new_node

        if node_dict[b"type"] == b"Leaf":
            new_leaf = Leaf(tree=self.tree)
            entries = node_dict[b"entries"]

            for (key, value) in entries.items():
                new_leaf.bucket[key.decode("utf-8")] = value

            return new_leaf



        pass
Пример #17
0
def parse():
    reps = {
        'wp': {},
        'names': {},
        'links': {},
    }
    d = cache.cachewp(
        'http://en.wikipedia.org/wiki/List_of_current_United_States_Senators?printable=yes'
    )
    html = lxml.html.document_fromstring(d)
    tables = html.xpath("//table")
    table = tables[1]
    for r in table.xpath("//tr"):
        data = r.xpath("td")
        if (len(data) > 7):
            f_state = data[1]
            f_class = data[2]
            f_image = data[3]
            f_name = data[4]

            (f_name_element, skip, f_name_link,
             skip) = f_name.iterlinks().next()
            obj = {
                'type': 'senate',
                'links': {
                    #                    'congbio' : '',
                    'homepage': {}
                },
                'link': f_name_link,
                'state': f_state.text,
                'district': f_class.text,
                'name': f_name_element.text
            }

            link = re.search("/([^\/]+)$", f_name_link).group(1)
            link = urllib.unquote(link)
            link = encode.decode(link)
            """ we are going to collect all the links and point to the object """
            obj = wiki.parse_wiki_page(f_name_link, reps, obj)
            reps['wp'][link] = obj
            reps['names'][f_name_element.text] = obj

    return reps
def parse() :
    reps = {
    'wp': {},
    'names': {},
    'links': {},
    }
    d = cache.cachewp ('http://en.wikipedia.org/wiki/List_of_current_United_States_Senators?printable=yes')
    html = lxml.html.document_fromstring(  d  )
    tables = html.xpath("//table")
    table = tables[1]
    for r in table.xpath("//tr") :
        data= r.xpath("td")
        if( len(data) > 7):
            f_state = data[1]
            f_class = data[2]
            f_image = data[3]
            f_name  = data[4]

            (f_name_element, skip , f_name_link, skip) =f_name.iterlinks().next()
            obj = {
                'type': 'senate',
                'links' :   {
#                    'congbio' : '',
                    'homepage' : {}
                },
                'link' :   f_name_link,
                'state' :   f_state.text,
                'district' :  f_class.text,
                'name' : f_name_element.text
            }


            link = re.search("/([^\/]+)$",f_name_link).group(1)          
            link = urllib.unquote(link)
            link = encode.decode(link)

            """ we are going to collect all the links and point to the object """ 
            obj=wiki.parse_wiki_page(f_name_link,reps,obj)
            reps['wp'][link]= obj
            reps['names'][f_name_element.text]= obj

    return reps
Пример #19
0
    def _load(self):
        """
        Load the node from disk.
        """
        if self.offset is None:
            # Loading a node without an offset should never happen
            raise Exception

        filename = self.tree.filename

        ex = None
        with open(filename, 'rb') as db:
            try:
                db.seek(self.offset)
                self.node = decode(db, self.tree)
                self.set_lazy()
            except Exception as e:
                ex = e

        if ex is not None:
            raise ex
Пример #20
0
def retrieve_url(db, mini_url, device):
    """
    Decodes the mini url to its id, then runs a query returning the appropriate target url.
    The query returns the device specific target url if available or the default url if not
    :param db: database connection
    :param mini_url: the mini url
    :param device: one of 'mobile', 'tablet' or 'default'
    :return: target url for redirect, None if mini_url not found in db
    """
    cursor = db.cursor()
    try:
        mini_url_id = decode(mini_url)
    except ValueError:
        return None
    cursor.execute(QUERY_RETRIEVE_TARGET, (mini_url_id, device))
    row = cursor.fetchone()
    if row is None:
        return None
    else:
        cursor.execute(QUERY_UPDATE_HITS, (mini_url_id, row[1]))
        return row[0]
Пример #21
0
def oldproSocket(host, port, cmd, type=None):
    '''
	@host: 主机IP
	@port: 主机端口
	@cmd:  指令
	'''

    # 指令执行后返回的结果
    data = ''

    # 开始通信
    try:
        sock = socket.socket()
        sock.settimeout(SOCKET['PROTIMEOUT'])
        sock.connect((host, port))

        if type:
            sock.send(encode(cmd + type))
        else:
            sock.send(encode(cmd))

        data = decode(sock.recv(SOCKET['BUFSIZE']))
        sock.close()

    except socket.timeout:
        sock.send(encode('Time out!'))
        save_log('WARNING', 'host:' + host + ' Time out!')
        sock.close()
        return data
        sys.exit()

    except socket.error, args:
        (error_no, msg) = args
        error_log = 'Connect server faild:%s, error_no=%d ,error_host=%s' % (
            msg, error_no, host)
        save_log('ERROR', error_log)
        sock.close()
        return data
        sys.exit()
Пример #22
0
def oldproSocket(host, port, cmd, type=None):
	'''
	@host: 主机IP
	@port: 主机端口
	@cmd:  指令
	'''

	# 指令执行后返回的结果
	data = ''

	# 开始通信
	try:
		sock = socket.socket()
		sock.settimeout(SOCKET['PROTIMEOUT'])
		sock.connect((host, port))
			
		if type:
			sock.send(encode(cmd + type))
		else:
			sock.send(encode(cmd))
		
		data = decode(sock.recv(SOCKET['BUFSIZE']))
		sock.close()
	
	except socket.timeout:
		sock.send(encode('Time out!'))
		save_log('WARNING','host:'+host+' Time out!')
		sock.close()
		return data
		sys.exit()
	
	except socket.error, args:
		(error_no, msg) = args
		error_log = 'Connect server faild:%s, error_no=%d ,error_host=%s' % (msg, error_no,host)
		save_log('ERROR',error_log)
		sock.close()
		return data
		sys.exit()
Пример #23
0
def get_shipy_state(state, h_tot, steps, tot_turns = 400, map_size = 7):
    # h_tot is the halite available
    N = np.count_nonzero(state[:,:,1]) # number of ships in the map
    t_left = tot_turns - steps # number of turns left until the end of the episode

    shipy_enc = cod.one_to_index(state[:,:,3], map_size) 
    shipy_dec = cod.decode(shipy_enc, map_size)
    s1 = shipy_dec + [1,0]
    s2 = shipy_dec + [-1,0]
    s3 = shipy_dec + [0,1]
    s4 = shipy_dec + [0,-1]
    
    s = [shipy_dec,s1,s2,s3,s4]
    mask = np.zeros((map_size,map_size)).astype(int)
    
    for x in s:
        mask[tuple(x)] = 1

    mask = mask.astype(bool)
    near_ships = state[:,:,1][mask].sum() #number of ships that in one move can go to the shipyard
    
    shipy_state = np.array([N,t_left,h_tot,near_ships])
    return shipy_state
Пример #24
0
    def read_piece(ty):
        tree = encode.decode(types, m, ty, inp)

        # Read the dictionary of lazy parts
        # TODO: We don't need this; it is implicit in the tree we just read.
        num_lazy_parts = bits.read_varint(inp)
        lazy_offsets = [0]
        for _ in range(num_lazy_parts):
            lazy_size = bits.read_varint(inp)
            lazy_offsets.append(lazy_offsets[-1] + lazy_size)
        lazy_offsets = list(
            map(lambda offset: offset + inp.tell(), lazy_offsets))

        def restore_lazy_part(ty, attr, index):
            inp.seek(lazy_offsets[index])
            part = read_piece(attr.resolved_ty)
            assert inp.tell() == lazy_offsets[
                index + 1], f'{inp.tell()}, {lazy_offsets[index + 1]}'
            return part

        restorer = lazy.LazyMemberRestorer(types, restore_lazy_part)
        tree = restorer.replace(ty, tree)
        inp.seek(lazy_offsets[-1])
        return tree
Пример #25
0
def foo(link):
    link = urllib.unquote(link)
    link = re.search("/([^\/]+)$", link).group(1)
    link = encode.decode(link)
    #    print link
    return link
Пример #26
0
def receive_message(host, port):
	packet_receiver = udp.UDP(host, port)
	encoded_values, message = packet_receiver.receive()
	
	covert_message = encode.decode(encoded_values)
	return covert_message, message
from encode import encode,decode
import signature

# This is the test data to sign
data = 'test signed data'

# Generate a PyCrypto RSA key using signature module
key = signature.generate_key()

# Sign the test data with the key
sig = signature.sign(key, data)

# Dump encoded versions of the keys and data to the console
print 'private', signature.key_to_string(key)
print 'public', signature.key_to_string(key.publickey())
print 'address', signature.public_key_to_address(key.publickey())
print 'data', data
print 'signature', sig, '\n'

# Test verification code
print 'call to verify() with legit data\t\t\t%s\t(Should be True)' % signature.verify(key, sig, data)
print 'call of verify() with tampered data\t\t\t%s\t(Should be False)' % key.verify(data+'asdf',(decode(encode(sig),type='long'),None))
Пример #28
0
def foo(link):
    link = urllib.unquote(link)
    link = re.search("/([^\/]+)$",link).group(1)          
    link = encode.decode(link)
#    print link
    return link
Пример #29
0
def mainExtractProcess(fig='lena.modified.png'):
    mfig = fig
    imgRcv = cv2.imdecode(numpy.fromfile(mfig, dtype=numpy.uint8), -1)
    imgRcv = cv2.cvtColor(imgRcv, cv2.COLOR_BGR2RGB)
    grayRcv = cvtGray(imgRcv)
    predictRcv, pErrorRcv, rhoRcv = PEs(grayRcv, imgRcv)
    # 提取边框的参数
    border = sorted(
        list(
            set(map(tuple, np.argwhere(grayRcv == grayRcv))) - set(
                map(
                    tuple,
                    np.argwhere(grayRcv[1:-1, 1:-1] == grayRcv[1:-1, 1:-1]) +
                    1))))
    border = [
        str(imgRcv[loc][2] % 2)
        for loc in filter(lambda xy: invariant(imgRcv[xy]), border)
    ]
    rhoT = int(''.join(border[:16]), 2)
    lastEc = int(''.join(border[16:24]), 2)
    La = int(''.join(border[24:40]), 2)
    N = int(''.join(border[40:72]), 2)
    selected = [tuple(n + 2) for n in np.argwhere(rhoRcv[2:-2, 2:-2] < rhoT)]
    tagsCode = [
        imgRcv[value][2] % 2
        for value in filter(lambda xy: invariant(imgRcv[xy]), selected[N:])
    ][:La] if La != 1 else [0] * N
    print(
        f'=> Finish extractig parameters:\n\trhoT: {rhoT}, lastEc: {lastEc}, La: {La}, N: {N}, tagsCode: {"".join([str(i) for i in tagsCode])}'
    )
    # 根据参数去提取嵌入的信息
    candidate = reversed([
        selected[:N][index] for index, value in enumerate(tagsCode)
        if value == 0
    ])
    predictRcv = imgRcv.copy().astype(np.int32)

    pErrorRcv = np.zeros(imgRcv.shape)
    msgRcv = ''
    for i in candidate:
        rM = np.array([
            imgRcv[i[0] + 1, i[1], 0], imgRcv[i[0], i[1] + 1, 0],
            imgRcv[i[0] + 1, i[1] + 1, 0]
        ]).reshape(3, 1)
        bM = np.array([
            imgRcv[i[0] + 1, i[1], 2], imgRcv[i[0], i[1] + 1, 2],
            imgRcv[i[0] + 1, i[1] + 1, 2]
        ]).reshape(3, 1)
        grM = np.array([
            grayRcv[i[0] + 1, i[1]], grayRcv[i[0], i[1] + 1], grayRcv[i[0] + 1,
                                                                      i[1] + 1]
        ]).reshape(3, 1)
        X = np.mat(np.column_stack(([1] * 3, grM, grM**2)))
        predictRcv[i][0] = predictV(rM, grayRcv[i], X)
        predictRcv[i][2] = predictV(bM, grayRcv[i], X)
        pErrorRcv[i] = imgRcv[i] - predictRcv[i]

        msgRcv += str(int(pErrorRcv[i][0]) % 2)

        nextEc = pErrorRcv[i][2] % 2
        pErrorRcv[i] = pErrorRcv[i] // 2
        imgRcv[i] = predictRcv[i] + pErrorRcv[i]
        imgRcv[i][1] = np.round(
            (grayRcv[i] - imgRcv[i][0] * RGB[0] - imgRcv[i][2] * RGB[2]) /
            RGB[1])
        if lastEc != 0:
            if np.round(
                    np.array(
                        [imgRcv[i][0], imgRcv[i][1] + lastEc,
                         imgRcv[i][2]]).dot(RGB)) == grayRcv[i]:
                imgRcv[i][1] += lastEc
                print(i)
            elif np.round(
                    np.array(
                        [imgRcv[i][0], imgRcv[i][1] - lastEc,
                         imgRcv[i][2]]).dot(RGB)) == grayRcv[i]:
                # else:
                imgRcv[i][1] -= lastEc
                print(i)

        else:
            if np.round(
                    np.array([imgRcv[i][0], imgRcv[i][1], imgRcv[i][2]
                              ]).dot(RGB)) != grayRcv[i]:
                print(f"index {i} has no matched ec")
        lastEc = abs(nextEc)
    imgRcv = cv2.cvtColor(imgRcv, cv2.COLOR_RGB2BGR)
    mfig = '.'.join(fig.split('.')[:-1] + ['extracted'] +
                    fig.split('.')[-1:])  # lena.modified.png
    cv2.imencode('.png', imgRcv)[1].tofile(mfig)
    print(f"=> Finish extracting received msg: {decode(msgRcv[::-1])}")
    # print(f"=> The msg is equal to received msg: {msg == decode(msgRcv[::-1])}")
    return decode(msgRcv[::-1])
Пример #30
0
if not os.path.exists(base_folder):
    mkdir_recursive(base_folder)

db_path = os.path.join(base_folder, db_name)
raw_data_folder_path = os.path.join(base_folder, raw_data_folder)
reduced_data_folder_path = os.path.join(base_folder, reduced_data_folder)
log_file_path = os.path.join(base_folder, data_log_file)

username = []
password = []
for login_config_path in login_config:

    with open(login_config_path) as login_f:

        login_config = json.load(login_f)
        username.append(decode(key, login_config['username']))
        password.append(decode(key, login_config['password']))

# Prepare the log format for ingestion
obslog_column_key = ('utc', 'name', 'propid', 'ra', 'dec', 'airmass',
                     'instrument', 'filter', 'binning', 'grating', 'exptime',
                     'seeing', 'sky', 'filename', 'groupid', 'err', 'qa')
data_column_key = ('filename', 'url', 'localpath', 'frametype')
reduction_column_key = ('filename', 'arc', 'sensitivity', 'stacked', 'output')

# Set-up logger
logger = logging.getLogger()
if data_log_level == "CRITICAL":
    logging.basicConfig(level=logging.CRITICAL)
if data_log_level == "ERROR":
    logging.basicConfig(level=logging.ERROR)
Пример #31
0
    except Exception as e:
        print('This script should not be run interactively for deployment. '
              'log level is set to DEBUG.')
        print(e)
        log_level = 'DEBUG'
        logging.basicConfig(level=logging.DEBUG)
        base_folder = os.path.join(os.path.abspath(os.getcwd()), base_folder)

db_path = os.path.join(base_folder, db_name)
raw_data_folder_path = os.path.join(base_folder, raw_data_folder)
reduced_data_folder_path = os.path.join(base_folder, reduced_data_folder)
log_file_path = os.path.join(base_folder, reduction_log_file)

with open(slack_config) as slack_f:
    slack_cfg = json.load(slack_f)
    webhook_url = decode(key, slack_cfg['webhook_url'])
    slack_token = decode(key, slack_cfg['token'])

# Configure to log into file
fh = logging.FileHandler(log_file_path, 'a+')
fh.setFormatter(formatter)
logger.addHandler(fh)

# Connect to the database
db_connector = sqlite3.connect(db_path)
logging.info('Connected to the {}.'.format(db_name))

# Create the db table if not exist
check_reduction_exist_query = 'CREATE TABLE IF NOT EXISTS "{}" ({}'.format(
    reduction_table, ''.join(
        ['"' + i + '" TEXT, ' for i in reduction_column_key]))[:-2] + ');'
Пример #32
0
def verify(key, signature, data):
    return key.verify(data, (decode(signature, type='long'), None))