Example #1
0
	def from_JSON(self, data):
		
		if ("_destination_movie" in data):
			self.destination_movie = data["_destination_movie"]
		
		if ("_snapshot_timestamp" in data):
			self.snapshot_timestamp = data["_snapshot_timestamp"]
		
		if ("_snapshot_name" in data):
			self._snapshot_name = data["_snapshot_name"]

		if ("_output_path_prefix" in data):
			self._output_path_prefix = data["_output_path_prefix"]

		if ("_text_objects" in data):
			for text_object in data["_text_objects"]:
				drawtext = DrawText()
				drawtext.from_JSON(text_object)
				self._text_objects.append(drawtext)
		
		if ("_image_objects" in data):
			for image_object in data["_image_objects"]:
				image = DrawImage()
				image.from_JSON(image_object)
				self._image_objects.append(image)

		if ("_encoders" in data):
			for encoder_object in data["_encoders"]:
				encoder = Encoder()
				encoder.from_JSON(encoder_object)
				self._encoders.append(copy.copy(encoder))
Example #2
0
 def encodeRaw(inputStream, outputStream, intervalLength, options):
     Coder.checkInterval(intervalLength)
     encoder = Encoder(inputStream, outputStream, options)
     Coder.doEncode(encoder, intervalLength)
Example #3
0
print("dec_self_attention_weights.shape:", dec_self_attention_weights.shape)
print(20 * '-')
print("dec_enc_attention_weights.shape:", dec_enc_attention_weights.shape)
print(100 * '-')

# test Encoder
print("Encoder:\n")
# 超參數
num_layers = 2  # 2 層的 Encoder
d_model = 4
num_heads = 2
dff = 8
input_vocab_size = vocab_size_en  # 記得加上 <start>, <end>

# 初始化一個 Encoder
encoder = Encoder(num_layers, d_model, num_heads, dff, input_vocab_size)

# 將 2 維的索引序列丟入 Encoder 做編碼
enc_out = encoder(en, training=False, mask=en_padding_mask)
print("en:", en)
print("-" * 20)
print("enc_out:", enc_out)
print(100 * '-')

# test decoder
print("Decoder:\n")
# 超參數
num_layers = 2  # 2 層的 Decoder
d_model = 4
num_heads = 2
dff = 8
Example #4
0
def runPacket_1(experimentSpec):
    #This vision is with no transform of the pass sending method(pass by pass)
    # some constants:
    packetLen = experimentSpec['packetLen']
    k = experimentSpec['spinal']['k']
    c = experimentSpec['map']['bitsPerSymbol']
    precision = experimentSpec['map']['precisionBits']
    B = experimentSpec['spinal']['B']
    d = experimentSpec['spinal']['d']
    SNR_dB = experimentSpec['channel']['SNR_dB']
    passNum = experimentSpec['protocol']['passNum']
    addEqu = 0
    tail = experimentSpec['spinal']['numLastCodeStep']
    # Message to be encoded:
    message = ''.join(random.choice(string.uppercase + string.digits) for _ in range((packetLen+7)//8))
    #pdb.set_trace();

    channelCof = experimentSpec['channel']['type']
    # initialize random number generator. this seed is fixed constant in order
    # to get deterministic results in this example.
    channelLen = len(channelCof)
    delay = experimentSpec['delay']
    channel_power = 0
    for i in range(channelLen):
        channel_power += channelCof[i]**2
    channel_dev = math.sqrt(channel_power)
    channelCof = [ch/channel_dev for ch in channelCof]

    mapper = SymbolMapper(c, precision)
    map_func = lambda value: mapper.map(value)
    
    #Instantiate an encoder
    #print 'Message:', message
    #print 'Message hex:', message.encode("hex")
    encoder = Encoder(k, map_func, message,packetLen)
    
    # spine length
    n = packetLen
    spine_length = (n + (k - 1)) / k
    symbols = []
    # encode  message, add the tail
    for p in range(passNum):
        for i in range(spine_length):
            symbols.append(encoder.get_symbol(i))
        for t in range(tail):
            symbols.append(encoder.get_symbol(spine_length-1))
    #print symbols
    #make sure we got the expected result
    
    # get average signal power
    codeRate = passNum/k
    signal_power = mapper.get_signal_average_power()
    SNR_ratio = math.pow(10.0, SNR_dB/10.0)
    noise_power = 2.0*channel_power*signal_power/ SNR_ratio
    noise_std_dev = math.sqrt(noise_power/2)
    

    
    # add white gaussian noise at 10dB to signal
    #print "Adding white gaussian noise at 10dB."
    symbols = np.convolve(symbols, channelCof)

    noisy_symbols_all = [sym + random.gauss(0, noise_std_dev) for sym in symbols]
    noisy_symbols = noisy_symbols_all[delay:-channelLen+delay+1]

    # round to closest integer
    noisy_symbols = [int(x + 0.5) for x in noisy_symbols]
    #print "noisy symbols:", noisy_symbols
    
    # instantiate decoder
    decoder = Decoder(k, B, d, channelLen-delay, passNum, map_func)
    
    # update decoder with gathered points
    for i in xrange(spine_length):
        symbols_in = []
        if i == spine_length-1:
            t = tail
            for ii in range(passNum):
                for t in range(tail+1):
                    symbols_in.append(noisy_symbols[i+t+ii*(spine_length+tail)])
        else:
            t = 0
            for ii in range(passNum):
                symbols_in.append (noisy_symbols[i+ii*(spine_length+tail)])
        if addEqu:
            decoder.advance_fading(symbols_in, channelCof, t)
        else:
            decoder.advance(symbols_in)
    
    results = decoder.get_most_likely()
    error,totalBits = errorStatistics(results,message,packetLen)
    print error, totalBits
    return error, totalBits
 def encode(self, string):
     self.set_string(string)  ### reciving the input stiring
     encoder = Encoder(self._string,
                       self._probrangedict)  ### creating an Encoder
     return encoder.encode(
     )  ### returning the binary Tagnumber generated by encoder given the stirng
Example #6
0
 def __init__(self, word_size_encoder, emb_dim, hidden_size,
              word_size_decoder, vector_size):
     super(seq2seq, self).__init__()
     self.encoder = Encoder(vector_size, word_size_encoder, emb_dim,
                            hidden_size)
     self.decoder = DecoderRNN(hidden_size, word_size_decoder, vector_size)
	def __init__(self, output, translate, log, installer,options):
		#- imports necessary
		import sys, os, ConfigParser
		from teco import color, style
		sys.path.append('modules/usb-rubber-ducky-manager/Encoder')
		from Encoder import Encoder

		self.directories = []
		self.files = []
		self.dir_before = []
		#- Operations
		#- Example:
		output.default('Usb Rubber Ducky Manager')
		def __menu__(path = ''):
			root_path = 'modules/usb-rubber-ducky-manager/apps/'
			self.directories,self.files = list_dir_file(root_path+path)
			c = 1
			for d in self.directories:
				print color('magenta', str(c)+" - "+d)
				c+=1
			for f in self.files:
				print color('azul', str(c)+" - "+f) 
				c+=1



		def list_dir_file(root_path):
			files = []
			directories = []
			for f in os.listdir(root_path):
				check = root_path+"/"+f
				if os.path.isfile(check):
					files.append(f)
				elif os.path.isdir(check):
					directories.append(f)
			return directories, files


		def option1():
			output.default('Has seleccionado la opcion 1')
		
		def option2():
			output.default('Has seleccionado la opcion 2')
		
		__menu__()

		control = True
		while control == True:
			options.set_completer(help.complete)
			cadena = ""
			for x in self.dir_before:
				cadena = cadena+" "+x
			sentencia = raw_input("Usb Rubber Ducky Manager "+cadena+" >> ")
			if sentencia == 'exit':
				sys.exit()
			elif sentencia == 'version':
				output.default(help.version())
			elif sentencia == 'menu' or sentencia == 'ls':
				dir_extend = ""
				for d in self.dir_before:
					dir_extend = dir_extend + d + "/"
				__menu__(dir_extend)
			elif sentencia == 'help':
				output.default(help.help())
			elif int(sentencia) == 0:
				dir_extend = ""
				if len(self.dir_before) != 0:
					self.dir_before.remove(self.dir_before[-1])
					for j in self.dir_before:
						dir_extend = dir_extend + j + "/" 
				__menu__(dir_extend)	
			elif (int(sentencia) <= len(self.directories)) and (int(sentencia)>0):
				self.dir_before.append(self.directories[int(sentencia)-1])
				dir_extend = ""
				for d in self.dir_before:
					dir_extend = dir_extend + d + "/"
				__menu__(dir_extend)
			elif (int(sentencia) > len(self.directories)) and (int(sentencia) <= (len(self.directories)+len(self.files))):
				dir_extend = ""
				for d in self.dir_before:
					dir_extend = dir_extend + d + "/"
				print 'modules/usb-rubber-ducky-manager/apps/'+dir_extend+self.files[int(sentencia)-len(self.directories)-1]
				duck_path = 'modules/usb-rubber-ducky-manager/apps/'+dir_extend+self.files[int(sentencia)-len(self.directories)-1]
				config = ConfigParser.ConfigParser()
				if not config.read(duck_path):
					output.default("No existe el archivo de instalación")
					sys.exit()
				else:
					print color('magenta', "1 - Information Script")
					print color('azul', "2 - Compile Script")
					option = raw_input("Usb Rubber Ducky Manager >> ")
					while option == "":
						option = raw_input("Usb Rubber Ducky Manager >> ")
					if option == "1":
						title = config.get('info','title')
						description = config.get('info','description')
						print color('rojo', "Title:")
						print title
						print color('rojo', "Description:")
						print description
					elif option == "2":
						delimiter = config.get('delimiter','key')
						duck_script = config.get('script_command','cuak')
						data = config.items('data')
						for x in data:
							new_string = raw_input("usb-rubber-ducky-manager - "+x[1]+" >> ")
							old_string =delimiter+x[0]+delimiter
							duck_script = duck_script.replace(old_string,new_string)
						print duck_script
						commands = duck_script.split('\n')
						path = 'modules/usb-rubber-ducky-manager/precompile/apps/'+dir_extend
						path_compile = 'modules/usb-rubber-ducky-manager/compile/apps/'+dir_extend
						file_save = path+self.files[int(sentencia)-len(self.directories)-1]
						file_save_compile = path_compile+self.files[int(sentencia)-len(self.directories)-1].split(".")[0]+".bin"
						if not os.path.exists(path):
							os.makedirs(path)
						if not os.path.exists(path_compile):
							os.makedirs(path_compile)
						log = open(file_save, 'w')
						for x in commands:
							log.write(x)
							log.write("\n")
						log.close()
						encoder_rubber = Encoder()
						encoder_rubber.compile(['encoder','-i',file_save,'-o',file_save_compile])
					else:
						output.default('No ha seleccionado una opcion correcta')	
			else:
				output.default('No ha seleccionado una opcion correcta')
Example #8
0
    def __init__(self, config):
        super(Model, self).__init__()
        self.config = config

        # 定义嵌入层
        self.embedding = Embedding(
            config.num_vocab,  # 词汇表大小
            config.embedding_size,  # 嵌入层维度
            config.pad_id,  # pad_id
            config.dropout)

        # 情感嵌入层
        self.affect_embedding = Embedding(config.num_vocab,
                                          config.affect_embedding_size,
                                          config.pad_id, config.dropout)
        self.affect_embedding.embedding.weight.requires_grad = False

        # post编码器
        self.post_encoder = Encoder(
            config.post_encoder_cell_type,  # rnn类型
            config.embedding_size + config.affect_embedding_size,  # 输入维度
            config.post_encoder_output_size,  # 输出维度
            config.post_encoder_num_layers,  # rnn层数
            config.post_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # response编码器
        self.response_encoder = Encoder(
            config.response_encoder_cell_type,
            config.embedding_size + config.affect_embedding_size,  # 输入维度
            config.response_encoder_output_size,  # 输出维度
            config.response_encoder_num_layers,  # rnn层数
            config.response_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # 先验网络
        self.prior_net = PriorNet(
            config.post_encoder_output_size,  # post输入维度
            config.latent_size,  # 潜变量维度
            config.dims_prior)  # 隐藏层维度

        # 识别网络
        self.recognize_net = RecognizeNet(
            config.post_encoder_output_size,  # post输入维度
            config.response_encoder_output_size,  # response输入维度
            config.latent_size,  # 潜变量维度
            config.dims_recognize)  # 隐藏层维度

        # 初始化解码器状态
        self.prepare_state = PrepareState(
            config.post_encoder_output_size + config.latent_size,
            config.decoder_cell_type, config.decoder_output_size,
            config.decoder_num_layers)

        # 解码器
        self.decoder = Decoder(
            config.decoder_cell_type,  # rnn类型
            config.embedding_size + config.affect_embedding_size +
            config.post_encoder_output_size,
            config.decoder_output_size,  # 输出维度
            config.decoder_num_layers,  # rnn层数
            config.dropout)  # dropout概率

        # bow预测
        self.bow_predictor = nn.Sequential(
            nn.Linear(config.post_encoder_output_size + config.latent_size,
                      config.num_vocab), nn.Softmax(-1))

        # 输出层
        self.projector = nn.Sequential(
            nn.Linear(config.decoder_output_size, config.num_vocab),
            nn.Softmax(-1))
Example #9
0
        dec_pos_ids = fluid.layers.data(name="pos_ids", shape=[None, SEQ_MAX_LEN], dtype='int64')
        dec_enc_slf_attn = fluid.layers.data(name='enc_slf_attn', shape=[None, SEQ_MAX_LEN, SEQ_MAX_LEN], dtype='int64')

        # task label
        dec_lm_label_mat = fluid.layers.data(name='lm_label_mat', shape=[None, SEQ_MAX_LEN], dtype='int64')
        dec_lm_pos_mask = fluid.layers.data(name='lm_pos_mask', shape=[None, SEQ_MAX_LEN, SEQ_MAX_LEN], dtype='int64')
        dec_lm_pos_len = fluid.layers.data(name='lm_pos_len', shape=[None, 1], dtype='int64')

        goal_type_pos = fluid.layers.data(name="goal_type_pos", shape=[None, 2], dtype='int64')
        goal_type_label = fluid.layers.data(name="goal_type_label", shape=[None], dtype='int64')

        # enc_dec_mask
        enc_dec_mask = fluid.layers.data(name='enc_dec_mask', shape=[None, SEQ_MAX_LEN, SEQ_MAX_LEN], dtype='int64')

        # network
        encode = Encoder(enc_token_ids, enc_pos_ids, enc_segment_ids,
                         enc_input_length, config)
        enc_output = encode.get_sequence_output()

        decode = Decoder(dec_token_ids, dec_pos_ids, dec_segment_ids,
                         dec_enc_slf_attn, config=config, enc_input=enc_output, enc_input_mask=enc_dec_mask)
        dec_output = decode.get_sequence_output()


        loss, goal_type_acc = decode.pretrain(goal_type_pos, goal_type_label,
                                              dec_lm_label_mat, dec_lm_pos_mask, dec_lm_pos_len)

        # loss
        adam = fluid.optimizer.AdamOptimizer()
        adam.minimize(loss)

    # define executor
Example #10
0
from Encoder import Encoder

print("Test Encoder")
e1 = Encoder(5, 6)
c = e1.countPulse
print(c)
e1.start()
Example #11
0
    def __init__(self, config):
        super(Model, self).__init__()

        self.config = config

        # 情感嵌入层
        self.affect_embedding = AffectEmbedding(config.num_vocab,
                                                config.affect_embedding_size,
                                                config.pad_id)

        # 定义嵌入层
        self.embedding = WordEmbedding(
            config.num_vocab,  # 词汇表大小
            config.embedding_size,  # 嵌入层维度
            config.pad_id)  # pad_id

        # 情感编码器
        self.affect_encoder = Encoder(
            config.encoder_decoder_cell_type,  # rnn类型
            config.affect_embedding_size,  # 输入维度
            config.affect_encoder_output_size,  # 输出维度
            config.encoder_decoder_num_layers,  # 层数
            config.encoder_bidirectional,  # 是否双向
            config.dropout)

        # 编码器
        self.encoder = Encoder(
            config.encoder_decoder_cell_type,  # rnn类型
            config.embedding_size,  # 输入维度
            config.encoder_decoder_output_size,  # 输出维度
            config.encoder_decoder_num_layers,  # rnn层数
            config.encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        self.attention = Attention(config.encoder_decoder_output_size,
                                   config.affect_encoder_output_size,
                                   config.attention_type,
                                   config.attention_size)

        self.prepare_state = PrepareState(
            config.encoder_decoder_cell_type,
            config.encoder_decoder_output_size +
            config.affect_encoder_output_size,
            config.encoder_decoder_output_size)

        self.linear_prepare_input = nn.Linear(
            config.embedding_size + config.affect_encoder_output_size +
            config.attention_size, config.decoder_input_size)

        # 解码器
        self.decoder = Decoder(
            config.encoder_decoder_cell_type,  # rnn类型
            config.decoder_input_size,  # 输入维度
            config.encoder_decoder_output_size,  # 输出维度
            config.encoder_decoder_num_layers,  # rnn层数
            config.dropout)  # dropout概率

        # 输出层
        self.projector = nn.Sequential(
            nn.Linear(
                config.encoder_decoder_output_size + config.attention_size,
                config.num_vocab), nn.Softmax(-1))
Example #12
0
#!/usr/bin/env python
from Encoder import Encoder
import time

encoder = Encoder(38, 40) 
t = time.time()
while True:
    cycles, dir_flag = encoder.get_cycles()
    if dir_flag:
        t = time.time()
        print "Wheel changed directions!"
        continue
    if cycles != 0:
        dt = time.time()-t
        t = time.time()
        print "Completed cycle in %f seconds" % dt
class Gui():
	root = None # Root View
	_input = None # Input Source
	_output = None # Output Source
	e = None # Encoder
	encodeFinished = False
	progressBarLength = 0 # Length of progressbar
	counter = 0 # filecounter
	def __init__(self):

		# Rootview
		self.root=Tk()

		# Set Title
		self.root.title("MP3 - Encoder - Multithreaded by Masterky")

		# 1 = VBR Preset, 2 = VBR, 3 = Bitrate
		self.options = IntVar()
		self.options.set(1)
		self.optionOld = 0

		self.qualityPreset = IntVar()
		self.qualityPreset.set(1) 

		# Input Label and Button
		self.label_input_text = StringVar()
		self.label_input_text.set("Selected Input: nothing") 
		self.label_input = Label(self.root, fg='blue', textvariable=self.label_input_text)
		self.label_input.pack(fill=X)

		button_input = Button(self.root, command=self.setInput, text="Input Directory", highlightthickness=3)
		button_input.pack(fill=X)
		
		# Output Label and Button
		self.label_output_text = StringVar()
		self.label_output_text.set("Selected Output: nothing") 
		label_output = Label(self.root,fg='blue', textvariable=self.label_output_text)
		label_output.pack(fill=X)
		button_output = Button(self.root, command=self.setOutput, text="Output Directory", highlightthickness=3)
		button_output.pack(fill=X)

		# Encoding
		EncodingLabel = Label(self.root, fg='blue', text="""Choose Encoding Method:""",justify = CENTER, pady=10).pack()

		# Encoding Methods
		Radiobutton(self.root, text="VBR Preset", padx = 20, highlightthickness=3, variable=self.options, value=1).pack(anchor=W) 
		Radiobutton(self.root, text="VBR", padx = 20, highlightthickness=3, variable=self.options, value=2).pack(anchor=W)
		Radiobutton(self.root, text="Bitrate", highlightthickness=3, padx = 20, variable=self.options, value=3).pack(anchor=W)

		PRESET_EXTREM = "extreme"
		PRESET_MEDIUM = "medium"
		PRESET_STANDARD = "standard"
		PRESET_INSANE = "insane"

		# VBR Presets
		self.presets = []

		Label(self.root, text="""VBR Preset:""",justify = CENTER, pady=10).pack()
		self.preset1 = Radiobutton(self.root, text=PRESET_STANDARD, padx = 20,variable=self.qualityPreset, value=0)
		self.preset2 = Radiobutton(self.root, text=PRESET_MEDIUM, padx = 20, variable=self.qualityPreset, value=1)
		self.preset3 = Radiobutton(self.root, text=PRESET_EXTREM, padx = 20, variable=self.qualityPreset, value=2)
		self.preset4 = Radiobutton(self.root, text=PRESET_INSANE, padx = 20, variable=self.qualityPreset, value=3)

		self.preset1.pack(anchor=W)
		self.preset2.pack(anchor=W)
		self.preset3.pack(anchor=W)
		self.preset4.pack(anchor=W)

		self.presets.append(self.preset1)
		self.presets.append(self.preset2)
		self.presets.append(self.preset3)
		self.presets.append(self.preset4)

		# VBR Quality
		Label(self.root, text="""VBR Quality:""",justify = CENTER, pady=10).pack()
		self.vbrQuality = Scale(self.root, from_=0, to_=9, orient=HORIZONTAL)
		self.vbrQuality.set(2)
		self.vbrQuality.pack(fill=X)

		# Bitrate
		Label(self.root, text="""Bitrate:""",justify = CENTER, pady=10).pack()
		self.bitrate = Scale(self.root, from_=32, to_=320, orient=HORIZONTAL)
		self.bitrate.set(120)
		self.bitrate.pack(fill=X)

		# Progressbar
		self.progressLabel = Label(self.root, text="""Progress:""",justify = CENTER, pady=10, bg='yellow')
		self.progressLabel.pack(anchor=CENTER)
		self.progressbar = ttk.Progressbar(self.root,orient=HORIZONTAL, length=0, mode="determinate")
		self.progressbar.pack(fill=X)



		# Encode Button
		self.encodeButton = Button(self.root, text="Encode", command=self.callback)
		self.encodeButton.config(bg = 'yellow')
		self.encodeButton.pack()
		self.root.geometry("480x620")

		# Binding method to gray out
		self.root.bind('<<Handler>>', self.handleOption)

		# Starting thread
		th = threading.Thread(target=self.startHandler)
		th.setDaemon(1)
		th.start()

	def showFileInputError(self):
	   tkMessageBox.showerror("Error", "Please select an input and output directory!")
	def setInput(self):
		_dir =  tkFileDialog.askdirectory(parent=self.root, title='Select Directory with MP3 Files')
		self._input = _dir
		if type(self._input) is str:
			if (self._input != ""):
				self.encodeFilePath(self._input)
				print "Anz Files: ", self.countFiles(self._input)
			else: pass
		else:
			self._input = ""
		print "Input: ", self._input
		
		self.label_input_text.set("Selected Input = "+ self._input)
	def encodeFilePath(self, path):
		return path.replace("/", os.sep)
	def setOutput(self):
		_dir =  tkFileDialog.askdirectory(parent=self.root, title='Select Directory with MP3 Files')
		self._output = _dir
		if type(self._output) is str:
			if (self._output != ""):
				self._output = self.encodeFilePath(self._output)
			else:
				pass
		print "Output: ", self._output
		self.label_output_text.set("Selected Output = " + self._output)
	def countFiles(self, dir):
		for newdir in os.listdir(dir):
			newFile = os.path.join(dir, newdir)
			if os.path.isdir(newFile):
				self.countFiles(newFile)
			else:
				self.counter += 1
		return self.counter

	def handleOption(self, event):

		# Update Views
		option = self.getSelection()
		if self.optionOld is not option:
			if option is 1:
				# Preset
				print "VBR Preset selected"
				self.enablePresets()
				self.disableVBRQuality()
				self.disableBitrate()
			elif option is 2:
				# VBR Quality
				print "VBR Quality selected"
				self.enableVBRQuality()
				self.disableBitrate()
				self.disablePresets()
			elif option is 3:
				# Bitrate
				print "Bitrate selected"
				self.enableBitrate()
				self.disablePresets()
				self.disableVBRQuality()
				#time.sleep(0.1)
			else:
				print "Something went wrong"
			#self.root.update()
			self.optionOld = option
		else:
			pass

	def startEncoder(self):
		option = self.getSelection()
		if option is 1:
			# Presets

			vbrQuality = None
			bitrate = None
			fastMode = True
			threads = 4
			print self.qualityPreset.get()

			preset = self.presets[self.qualityPreset.get()]['text']
			print "Preset = ", preset
			self._start(_preset=preset, _bitrate=bitrate, _fastMode=fastMode, _vbrQuality=vbrQuality, _threads=threads)

		elif option is 2:
			# VBR Quality

			preset = None
			bitrate = None
			fastMode = True
			threads = 4
			vbrQuality = int(self.vbrQuality.get())

			self._start(_preset=preset, _bitrate=bitrate, _fastMode=fastMode, _vbrQuality=vbrQuality, _threads=threads)
		else:
			# Option 3
			vbrQuality = None
			preset = None
			fastMode = True
			threads = 4
			print "Bitrate:",self.bitrate.get()
			bitrate = int(self.bitrate.get())

			self._start(_preset=preset, _bitrate=bitrate, _fastMode=fastMode, _vbrQuality=vbrQuality, _threads=threads)

	def _start(self, _preset=None, _bitrate=None, _fastMode=True, _vbrQuality=None, _threads=4):
		self.d = Encoder(encodePath=self._input, outputPath=self._output, preset=_preset, bitrate=_bitrate, fastMode=_fastMode, vbr=_vbrQuality, threads=_threads, guiMode=True)
		self.d.startEncoder()
		self.encodeFinished = True
		self.encodeButton.config(state=NORMAL)
		#self._showFinished()

	# This method is not Thread safe with Windows
	# Gui will freeze
	def _showFinished(self):
		tkMessageBox.showinfo("MP3 Encoder", "Finished Decoding in " + str(self.d.getDifTime()) + " Seconds")
		
	def startHandler(self):
	  	while 1:
	  		time.sleep(0.5)
	  		self.root.event_generate('<<Handler>>', when='tail')
	def runGui(self):
		guiThread = threading.Thread(target=self.root.mainloop())
		#guiThread.setDaemon(1)
		guiThread.start()

	def getSelection(self):
		return self.options.get()
	def updateProgressBar(self):

		filesDoneOld = 0

		while self.encodeFinished is not True:
			time.sleep(0.1)
			if self.d is not None:
				filesDone = self.d.getFilesDone()
				print "Files done: ", filesDone
				self.progressLabel.config(text=(str(filesDone) + " of " + str(self.progressBarLength) + " files encoded") + " | " +  "Time: " + str(self.d.getDifTime()) + " secs")
				change = filesDone - filesDoneOld
				if change is not 0:
					self.progressbar.step(amount=change)
					# print "Change Progress: ", change
					filesDoneOld = filesDone
				else:
					pass

			else:
				self.progressLabel.config(text="Encoder Object is None, something unexpected happend, please contact...")
		currentValue = int(self.progressbar["value"])
		if currentValue is not 0:
			self.progressbar.step(self.progressBarLength-currentValue)

		self.progressLabel.config(text="Finished" + "| " +  "Time: " + str(self.d.getDifTime()) + " secs")
		print
		print "Progressbar Thread terminated"
		print 
	def callback(self):

		if self._output is None or self._input is None:
			print "Please select In- or Output"
			self.showFileInputError()
		else:
			if self._output == "" or self._input == "":
				print "Please select valid In- or Output"
				self.showFileInputError()
			else:

				self.encodeButton.config(state=DISABLED)
				self.encodeFinished = False

				print "Start Decoding"
				self.counter = 0
				self.progressBarLength = self.countFiles(self._input)

				print "Setting progressbar max to amount of files"
				self.progressbar.config(maximum=self.progressBarLength)	
				print "Encoder is running now"

				progressBarThread = threading.Thread(target=self.updateProgressBar)
				progressBarThread.setDaemon(1)
				progressBarThread.start()
				
			
				EncoderThread = threading.Thread(target=self.startEncoder)
				EncoderThread.setDaemon(1)
				EncoderThread.start()
				print "Thread started"

	#########################################
	# Disable
	#########################################

	def disablePresets(self):
		print len(self.presets)
		for pre in self.presets:
			pre.config(state = DISABLED)
			pre.config(bg = 'white')
	def disableBitrate(self):
		self.bitrate.config(state = DISABLED)
		self.bitrate.config(bg = 'gray' )
	def disableVBRQuality(self):
		self.vbrQuality.config(state = DISABLED)
		self.vbrQuality.config(bg = 'gray' )

	#########################################
	# Enable
	#########################################
	def enablePresets(self):
		for pre in self.presets:
			pre.config(state = NORMAL)
			pre.config(bg = 'yellow')
	def enableVBRQuality(self):
		self.vbrQuality.config(state = NORMAL)
		self.vbrQuality.config(bg = 'yellow' )
	def enableBitrate(self):
		self.bitrate.config(state = NORMAL)
		self.bitrate.config(bg = 'yellow' )
	def _start(self, _preset=None, _bitrate=None, _fastMode=True, _vbrQuality=None, _threads=4):
		self.d = Encoder(encodePath=self._input, outputPath=self._output, preset=_preset, bitrate=_bitrate, fastMode=_fastMode, vbr=_vbrQuality, threads=_threads, guiMode=True)
		self.d.startEncoder()
		self.encodeFinished = True
		self.encodeButton.config(state=NORMAL)
Example #15
0
class Mem2SeqRunner(ExperimentRunnerBase):
    def __init__(self, args):
        super(Mem2SeqRunner, self).__init__(args)

        # Model parameters
        self.gru_size = 128
        self.emb_size = 128
        #TODO: Try hops 4 with task 3
        self.hops = 3
        self.dropout = 0.2

        self.encoder = Encoder(self.hops, self.nwords, self.gru_size)
        self.decoder = Decoder(self.emb_size, self.hops, self.gru_size,
                               self.nwords)

        self.optim_enc = torch.optim.Adam(self.encoder.parameters(), lr=0.001)
        self.optim_dec = torch.optim.Adam(self.decoder.parameters(), lr=0.001)
        if self.loss_weighting:
            self.optim_loss_weights = torch.optim.Adam([self.loss_weights],
                                                       lr=0.0001)
        self.scheduler = lr_scheduler.ReduceLROnPlateau(self.optim_dec,
                                                        mode='max',
                                                        factor=0.5,
                                                        patience=1,
                                                        min_lr=0.0001,
                                                        verbose=True)

        if self.use_cuda:
            self.cross_entropy = self.cross_entropy.cuda()
            self.encoder = self.encoder.cuda()
            self.decoder = self.decoder.cuda()
            if self.loss_weighting:
                self.loss_weights = self.loss_weights.cuda()

    def train_batch_wrapper(self, batch, new_epoch, clip_grads):
        context = batch[0].transpose(0, 1)
        responses = batch[1].transpose(0, 1)
        index = batch[2].transpose(0, 1)
        sentinel = batch[3].transpose(0, 1)
        context_lengths = batch[4]
        target_lengths = batch[5]
        return self.train_batch(context, responses, index, sentinel, new_epoch,
                                context_lengths, target_lengths, clip_grads)

    def train_batch(self, context, responses, index, sentinel, new_epoch,
                    context_lengths, target_lengths, clip_grads):

        # (TODO): remove transpose
        if new_epoch:  # (TODO): Change this part
            self.loss = 0
            self.ploss = 0
            self.vloss = 0
            self.n = 1

        context = context.type(self.TYPE)
        responses = responses.type(self.TYPE)
        index = index.type(self.TYPE)
        sentinel = sentinel.type(self.TYPE)

        self.optim_enc.zero_grad()
        self.optim_dec.zero_grad()
        if self.loss_weighting:
            self.optim_loss_weights.zero_grad()

        h = self.encoder(context.transpose(0, 1))
        self.decoder.load_memory(context.transpose(0, 1))
        y = torch.from_numpy(np.array([2] * context.size(1),
                                      dtype=int)).type(self.TYPE)
        y_len = 0

        h = h.unsqueeze(0)
        output_vocab = torch.zeros(max(target_lengths), context.size(1),
                                   self.nwords)
        output_ptr = torch.zeros(max(target_lengths), context.size(1),
                                 context.size(0))
        if self.use_cuda:
            output_vocab = output_vocab.cuda()
            output_ptr = output_ptr.cuda()
        while y_len < responses.size(0):  # TODO: Add EOS condition
            p_ptr, p_vocab, h = self.decoder(context, y, h)
            output_vocab[y_len] = p_vocab
            output_ptr[y_len] = p_ptr
            #TODO: Add teqacher forcing ratio
            y = responses[y_len].type(self.TYPE)
            y_len += 1

        # print(loss)
        mask_v = torch.ones(output_vocab.size())
        mask_p = torch.ones(output_ptr.size())
        if self.use_cuda:
            mask_p = mask_p.cuda()
            mask_v = mask_v.cuda()
        for i in range(responses.size(1)):
            mask_v[target_lengths[i]:, i, :] = 0
            mask_p[target_lengths[i]:, i, :] = 0

        loss_v = self.cross_entropy(
            output_vocab.contiguous().view(-1, self.nwords),
            responses.contiguous().view(-1))

        loss_ptr = self.cross_entropy(
            output_ptr.contiguous().view(-1, context.size(0)),
            index.contiguous().view(-1))
        if self.loss_weighting:
            loss = loss_ptr/(2*self.loss_weights[0]*self.loss_weights[0]) + loss_v/(2*self.loss_weights[1]*self.loss_weights[1]) + \
               torch.log(self.loss_weights[0] * self.loss_weights[1])
            loss_ptr = loss_ptr / (2 * self.loss_weights[0] *
                                   self.loss_weights[0])
            loss_v = loss_v / (2 * self.loss_weights[1] * self.loss_weights[1])
        else:
            loss = loss_ptr + loss_v
        loss.backward()
        ec = torch.nn.utils.clip_grad_norm_(self.encoder.parameters(), 10.0)
        dc = torch.nn.utils.clip_grad_norm_(self.decoder.parameters(), 10.0)
        self.optim_enc.step()
        self.optim_dec.step()
        if self.loss_weighting:
            self.optim_loss_weights.step()

        self.loss += loss.item()
        self.vloss += loss_v.item()
        self.ploss += loss_ptr.item()

        return loss.item(), loss_v.item(), loss_ptr.item()

    def evaluate_batch(self,
                       batch_size,
                       input_batches,
                       input_lengths,
                       target_batches,
                       target_lengths,
                       target_index,
                       target_gate,
                       src_plain,
                       profile_memory=None):

        # Set to not-training mode to disable dropout
        self.encoder.train(False)
        self.decoder.train(False)
        # Run words through encoder
        decoder_hidden = self.encoder(input_batches.transpose(0,
                                                              1)).unsqueeze(0)
        self.decoder.load_memory(input_batches.transpose(0, 1))

        # Prepare input and output variables
        decoder_input = Variable(torch.LongTensor([2] * batch_size))

        decoded_words = []
        all_decoder_outputs_vocab = Variable(
            torch.zeros(max(target_lengths), batch_size, self.nwords))
        all_decoder_outputs_ptr = Variable(
            torch.zeros(max(target_lengths), batch_size,
                        input_batches.size(0)))
        # all_decoder_outputs_gate = Variable(torch.zeros(self.max_r, batch_size))
        # Move new Variables to CUDA

        if self.use_cuda:
            all_decoder_outputs_vocab = all_decoder_outputs_vocab.cuda()
            all_decoder_outputs_ptr = all_decoder_outputs_ptr.cuda()
            # all_decoder_outputs_gate = all_decoder_outputs_gate.cuda()
            decoder_input = decoder_input.cuda()

        p = []
        for elm in src_plain:
            elm_temp = [word_triple[0] for word_triple in elm]
            p.append(elm_temp)

        self.from_whichs = []
        acc_gate, acc_ptr, acc_vac = 0.0, 0.0, 0.0
        # Run through decoder one time step at a time
        for t in range(max(target_lengths)):
            decoder_ptr, decoder_vacab, decoder_hidden = self.decoder(
                input_batches, decoder_input, decoder_hidden)
            all_decoder_outputs_vocab[t] = decoder_vacab
            topv, topvi = decoder_vacab.data.topk(1)
            all_decoder_outputs_ptr[t] = decoder_ptr
            topp, toppi = decoder_ptr.data.topk(1)
            top_ptr_i = torch.gather(input_batches[:, :, 0], 0,
                                     Variable(toppi.view(1,
                                                         -1))).transpose(0, 1)
            next_in = [
                top_ptr_i[i].item() if
                (toppi[i].item() < input_lengths[i] - 1) else topvi[i].item()
                for i in range(batch_size)
            ]
            # if next_in in self.kb_entry.keys():
            #     ptr_distr.append([next_in, decoder_vacab.data])

            decoder_input = Variable(
                torch.LongTensor(next_in))  # Chosen word is next input
            if self.use_cuda: decoder_input = decoder_input.cuda()

            temp = []
            from_which = []
            for i in range(batch_size):
                if (toppi[i].item() < len(p[i]) - 1):
                    temp.append(p[i][toppi[i].item()])
                    from_which.append('p')
                else:
                    if target_index[t][i] != toppi[i].item():
                        self.incorrect_sentinel += 1
                    ind = topvi[i].item()
                    if ind == 3:
                        temp.append('<eos>')
                    else:
                        temp.append(self.i2w[ind])
                    from_which.append('v')
            decoded_words.append(temp)
            self.from_whichs.append(from_which)
        self.from_whichs = np.array(self.from_whichs)

        loss_v = self.cross_entropy(
            all_decoder_outputs_vocab.contiguous().view(-1, self.nwords),
            target_batches.contiguous().view(-1))
        loss_ptr = self.cross_entropy(
            all_decoder_outputs_ptr.contiguous().view(-1,
                                                      input_batches.size(0)),
            target_index.contiguous().view(-1))

        if self.loss_weighting:
            loss = loss_ptr/(2*self.loss_weights[0]*self.loss_weights[0]) + loss_v/(2*self.loss_weights[1]*self.loss_weights[1]) + \
               torch.log(self.loss_weights[0] * self.loss_weights[1])
        else:
            loss = loss_ptr + loss_v

        self.loss += loss.item()
        self.vloss += loss_v.item()
        self.ploss += loss_ptr.item()
        self.n += 1

        # Set back to training mode
        self.encoder.train(True)
        self.decoder.train(True)
        return decoded_words, self.from_whichs  # , acc_ptr, acc_vac

    def save_models(self, path):
        torch.save(self.encoder.state_dict(),
                   os.path.join(path, 'encoder.pth'))
        torch.save(self.decoder.state_dict(),
                   os.path.join(path, 'decoder.pth'))

    def load_models(self, path: str = '.'):
        self.encoder.load_state_dict(
            torch.load(os.path.join(path, 'encoder.pth')))
        self.decoder.load_state_dict(
            torch.load(os.path.join(path, 'decoder.pth')))
Example #16
0
#
#
# author: Jack Burns
# create date: 11/13/2017
# version 1.0
import sys

import networkx
from pydna.assembly import Assembly
from Encoder import Encoder
from pydna.dseqrecord import Dseqrecord
from pydna.amplify import Anneal
import matplotlib.pyplot as plt
import networkx as nx

enc = Encoder()


def main():
    if len(sys.argv) < 4:
        print("Input is not valid")
        return exit(1)
    else:
        file_prefix = 'Networks/'
        if file_prefix not in str(sys.argv[1]):
            file_path = 'Networks/' + str(sys.argv[1])
        else:
            file_path = str(sys.argv[1])
        try:
            graph = networkx.read_edgelist(file_path,
                                           create_using=networkx.DiGraph(),
Example #17
0
expected_encoder_output = [184, 108, 36, 108, 253, 68, 204, 119, 243, 141, 170, 56, 101, 97, 252, 79, 95, 236, 207, 191, 158, 89, 82, 151, 141, 255, 100, 112, 233, 220, 20, 146, 16, 108, 24, 117, 178, 175, 39, 210, 134, 224, 220, 75, 231, 4, 182, 189, 29, 59, 129, 105, 60, 64, 207, 253, 161, 41, 146, 10, 249, 210, 175, 121, 121, 37, 46, 239, 208, 18, 42, 101, 61, 67, 136, 166, 62, 192, 44, 43, 240, 97, 196, 228, 91, 94, 242, 9, 130, 218, 243, 208, 16, 248, 57, 194]

if __name__ == '__main__':
    from Encoder import Encoder
    from Decoder import Decoder
    from SymbolMapper import SymbolMapper
    import random
    import math
    
    mapper = SymbolMapper(c, precision)
    map_func = lambda value: mapper.map(value)
    
    #Instantiate an encoder
    print 'Message string: "%s"' % message
    print 'Message hex:', message.encode("hex")
    encoder = Encoder(k, map_func, message)
    
    # spine length
    n = 8 * len(message)
    spine_length = (n + (k - 1)) / k
    
    # encode a short message, making 3 passes
    print "Producing 3 passes."
    symbols = [encoder.get_symbol(i) for i in range(spine_length)*3]
    print "symbols: ", symbols
    
    # make sure we got the expected result
    assert(expected_encoder_output == symbols)
    
    # get average signal power
    signal_power = mapper.get_signal_average_power()
Example #18
0
data_object = ModelData(datapath, batch_size=64)
data_train, data_test = data_object.train_examples, data_object.test_examples

# parameters for defining encoder and decoder
INPUT_DIM, OUTPUT_DIM = data_object.input_features, data_object.output_features
ENC_HID_DIM = 512
DEC_HID_DIM = 512
N_LAYERS = 2
ENC_DROPOUT = 0.5
DEC_DROPOUT = 0.5
DEVICE = 'cpu'

# initialize encoder, decoder and seq2seq model classes
enc = Encoder(INPUT_DIM,
              ENC_HID_DIM,
              N_LAYERS,
              ENC_DROPOUT,
              is_bidirectional=False)
attn = CosAtt(enc, DEC_HID_DIM, N_LAYERS)
dec = Decoder(OUTPUT_DIM,
              DEC_HID_DIM,
              N_LAYERS,
              DEC_DROPOUT,
              enc,
              attention=attn)
model = Seq2Seq(enc, dec, DEVICE, attention=attn)


# initialize values of learnable parameters
def init_weights(m):
    for name, param in m.named_parameters():
Example #19
0
class Model(nn.Module):
    def __init__(self, config):
        super(Model, self).__init__()
        self.config = config

        # 定义嵌入层
        self.embedding = Embedding(config.num_vocab,  # 词汇表大小
                                   config.embedding_size,  # 嵌入层维度
                                   config.pad_id,
                                   config.dropout)

        self.affect_embedding = Embedding(config.num_vocab,
                                          config.affect_embedding_size,
                                          config.pad_id,
                                          config.dropout)
        self.affect_embedding.embedding.weight.requires_grad = False

        # 编码器
        self.encoder = Encoder(config.encoder_decoder_cell_type,  # rnn类型
                               config.embedding_size+config.affect_embedding_size,  # 输入维度
                               config.encoder_decoder_output_size,  # 输出维度
                               config.encoder_decoder_num_layers,  # rnn层数
                               config.encoder_bidirectional,  # 是否双向
                               config.dropout)  # dropout概率

        # 解码器
        self.decoder = Decoder(config.encoder_decoder_cell_type,  # rnn类型
                               config.embedding_size+config.affect_embedding_size+config.encoder_decoder_output_size,
                               config.encoder_decoder_output_size,  # 输出维度
                               config.encoder_decoder_num_layers,  # rnn层数
                               config.dropout)  # dropout概率

        # 输出层
        self.projector = nn.Sequential(
            nn.Linear(config.encoder_decoder_output_size, config.num_vocab),
            nn.Softmax(-1)
        )

    def forward(self, inputs, inference=False, max_len=60, gpu=True):
        if not inference:  # 训练
            id_posts = inputs['posts']  # [batch, seq]
            len_posts = inputs['len_posts']  # [batch]
            id_responses = inputs['responses']  # [batch, seq]
            len_decoder = id_responses.size(1) - 1

            embed_posts = torch.cat([self.embedding(id_posts), self.affect_embedding(id_posts)], 2)
            embed_responses = torch.cat([self.embedding(id_responses), self.affect_embedding(id_responses)], 2)

            # encoder_output: [seq, batch, dim]
            # state: [layers, batch, dim]
            _, state_encoder = self.encoder(embed_posts.transpose(0, 1), len_posts)
            if isinstance(state_encoder, tuple):
                context = state_encoder[0]
            context = context[-1, :, :].unsqueeze(0)  # [1, batch, dim]

            # 解码器的输入为回复去掉end_id
            decoder_inputs = embed_responses[:, :-1, :].transpose(0, 1)  # [seq-1, batch, embed_size]
            decoder_inputs = decoder_inputs.split([1] * len_decoder, 0)

            outputs = []
            for idx in range(len_decoder):
                if idx == 0:
                    state = state_encoder  # 解码器初始状态
                    decoder_input = torch.cat([decoder_inputs[idx], context], 2)
                else:
                    decoder_input = torch.cat([decoder_inputs[idx], context], 2)
                # output: [1, batch, dim_out]
                # state: [num_layer, batch, dim_out]
                output, state = self.decoder(decoder_input, state)
                # attn: [batch, 1, attention_size]
                outputs.append(output)

            outputs = torch.cat(outputs, 0).transpose(0, 1)  # [batch, seq-1, dim_out]
            output_vocab = self.projector(outputs)  # [batch, seq-1, num_vocab]

            return output_vocab
        else:  # 测试
            id_posts = inputs['posts']  # [batch, seq]
            len_posts = inputs['len_posts']  # [batch]
            batch_size = id_posts.size(0)

            embed_posts = torch.cat([self.embedding(id_posts), self.affect_embedding(id_posts)], 2)

            # state: [layers, batch, dim]
            _, state_encoder = self.encoder(embed_posts.transpose(0, 1), len_posts)
            if isinstance(state_encoder, tuple):
                context = state_encoder[0]
            context = context[-1, :, :].unsqueeze(0)  # [1, batch, dim]

            done = torch.tensor([0] * batch_size).bool()
            first_input_id = (torch.ones((1, batch_size)) * self.config.start_id).long()
            if gpu:
                done = done.cuda()
                first_input_id = first_input_id.cuda()

            outputs = []
            for idx in range(max_len):
                if idx == 0:  # 第一个时间步
                    state = state_encoder  # 解码器初始状态
                    decoder_input = torch.cat([self.embedding(first_input_id), self.affect_embedding(first_input_id),
                                               context], 2)
                else:
                    decoder_input = torch.cat([self.embedding(next_input_id), self.affect_embedding(next_input_id),
                                               context], 2)

                # output: [1, batch, dim_out]
                # state: [num_layers, batch, dim_out]
                output, state = self.decoder(decoder_input, state)
                outputs.append(output)

                vocab_prob = self.projector(output)  # [1, batch, num_vocab]
                next_input_id = torch.argmax(vocab_prob, 2)  # 选择概率最大的词作为下个时间步的输入 [1, batch]
                _done = next_input_id.squeeze(0) == self.config.end_id  # 当前时间步完成解码的 [batch]
                done = done | _done  # 所有完成解码的
                if done.sum() == batch_size:  # 如果全部解码完成则提前停止
                    break

            outputs = torch.cat(outputs, 0).transpose(0, 1)  # [batch, seq, dim_out]
            output_vocab = self.projector(outputs)  # [batch, seq, num_vocab]

            return output_vocab

    # 统计参数
    def print_parameters(self):
        r""" 统计参数 """
        total_num = 0  # 参数总数
        for param in self.parameters():
            num = 1
            if param.requires_grad:
                size = param.size()
                for dim in size:
                    num *= dim
            total_num += num
        print(f"参数总数: {total_num}")

    def save_model(self, epoch, global_step, path):
        r""" 保存模型 """
        torch.save({'embedding': self.embedding.state_dict(),
                    'affect_embedding': self.affect_embedding.state_dict(),
                    'encoder': self.encoder.state_dict(),
                    'decoder': self.decoder.state_dict(),
                    'projector': self.projector.state_dict(),
                    'epoch': epoch,
                    'global_step': global_step}, path)

    def load_model(self, path):
        r""" 载入模型 """
        checkpoint = torch.load(path)
        self.embedding.load_state_dict(checkpoint['embedding'])
        self.affect_embedding.load_state_dict(checkpoint['affect_embedding'])
        self.encoder.load_state_dict(checkpoint['encoder'])
        self.decoder.load_state_dict(checkpoint['decoder'])
        self.projector.load_state_dict(checkpoint['projector'])
        epoch = checkpoint['epoch']
        global_step = checkpoint['global_step']
        return epoch, global_step
# params defined
BUFFER_SIZE = len(input_tensor_train)
BATCH_SIZE = 64
steps_per_epoch = len(input_tensor_train)//BATCH_SIZE
embedding_dim = 256
units = 1024
vocab_inp_size = len(inp_lang.word_index)+1
vocab_tar_size = len(targ_lang.word_index)+1

dataset = tf.data.Dataset.from_tensor_slices(
    (input_tensor_train, target_tensor_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)

# encoder class object
encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)

# decoder class object
decoder = Decoder(vocab_tar_size, embedding_dim, units, BATCH_SIZE)

# optimizer
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(
    from_logits=True, reduction='none')

# checkpoints
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")

checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
Example #21
0
    del info
    # 添加标签,以便分隔
    df_train_data['train_or_test'] = '1'
    df_test_data['train_or_test'] = '2'
    #合并生成总集,方便编码
    df_all = df_train_data.append(df_test_data, ignore_index=True)

# 数据预处理:
    # 数据初处理
    pro = DataProcess()
    df_all = pro.prd(pro.removingInvalid(df_all))
    del pro
    print("数据初处理完成!")

    # 数据编码
    enc = Encoder()
    df_all = enc.encoder(enc.prepareEncoder(df_all))
    need_one_hot = enc.get_need_one_hot()
    del enc
    print("数据编码完成!")

    # 重新拆分训练集与测试集
    all_list = SplitTrOrTe().split(df_all,need_one_hot)
    x_train,y_train,x_test,y_test,test_user_id = all_list[0],all_list[1],all_list[2],all_list[3],all_list[4]
    del all_list
    print("重新拆分训练集与测试集完成!")

# 核心算法:
    # 过采样
    x_train, y_train = OverSample().over_sample(x_train,y_train)
    print("过采样完成!")
Example #22
0
    print('Loading data...')
    
    #数据加载部分
    data = Data_Preprocess(args.dataset, min_length=args.min_length, max_length=args.max_length,img = args.img)
    personas = len(data.people) + 1

    print("Number of training Samples    :", len(data.x_train))
    #print("sample:",data.x_train[0])
    #print("sample:",data.train_lengths[0])
    print("Number of validation Samples  :", len(data.x_val))
    print("Number of test Samples  :", len(data.x_test))
    print("Number of Personas            :", personas)
    print("Number of words            :", len(data.word2index)) 

    embedding = (len(data.word2index),128)
    encoder = Encoder(args.hidden_size, embedding, num_layers=args.num_layers, batch_size=args.batch_size, ELMo_embedding=False, train_embedding=True)

    generator = Generator(args.hidden_size, embedding, num_layers=args.num_layers,ELMo_embedding=False, train_embedding=True, dropout_p=args.dropout)

    sen_embedding = Sentence_embedding(args.dataset)

    if use_cuda:
        encoder = encoder.cuda()
        generator = generator.cuda()
        #sen_embedding = sen_embedding.cuda()
        pass
    """
    sens = sen_embedding.get_sen_twi('01USA18','pos')
    #print(sens) #打印句子
    #print(sen_embedding.miss_people[0]) #打印无情感向量名单
    s_embedding = sen_embedding.get_senti_embedding(sens)
Example #23
0
from Classifier import Classifier
from Encoder import Encoder
from ModelBlock import ModelBlock

encoder = Encoder()
classifier = Classifier(input_dim=(7,7,1024))

model=  ModelBlock.add_head(encoder, [classifier])

print(model.summary())
Example #24
0
    e = 0.5
    path = "./sort_I/sortI_BEC_" + str(e) + "_" + str(N) + ".dat"
    # path ="./polarcode/"+"sort_I_" + str(M) + "_" + str(P) + "_" + "20" + ".dat"

    if len(sys.argv) == 2:
        if sys.argv[1] == "ber":
            eroorcount = 0
            frameerrorcout = 0
            kaisu = 100

            start = time.time()
            for i in range(kaisu):
                message = Message(K)
                message.MakeMessage()

                encoder0 = Encoder(K, N, message.message, path, False)
                encoder0.MakeCodeworde()

                bec = BEC(e)
                bec.input = encoder0.codeword
                bec.Transmission()
                output = bec.output

                decoder0 = Decoder(K, N, output, "BEC", path, False)
                decoder0.DecodeMessage(e)

                error = np.bitwise_xor(message.message, decoder0.hat_message)
                eroorcount += np.count_nonzero(error)

                frameerrorcout += 0 if np.count_nonzero(error) == 0 else 1
                print(i, "/", kaisu, "回目, ",
    def runPacket(self, channelProcess, mapper, channelCof):
        # add the puncturing******
        # some constants:
        experimentSpec = self.experimentSpec
        ############################################################
        # set parameters
        packetLen = experimentSpec['packetLen']
        adaptable = experimentSpec['adaptable']
        # codeRate=1/R R=messageBits/sentBits
        k = experimentSpec['spinal']['k']
        B = experimentSpec['spinal']['B']
        d = experimentSpec['spinal']['d']
        c = experimentSpec['map']['bitsPerSymbol']
        pun = experimentSpec['spinal']['punctureRate']
        tail = experimentSpec['spinal']['numLastCodeStep']
        # code rate caculation
        codeRate = experimentSpec['spinal']['codeRate']
        k = experimentSpec['spinal']['k']
        passNum = int(k * pun / codeRate / c)
        # print passNum
        SNR_dB = experimentSpec['channel']['SNR_dB']
        estDelay = experimentSpec['estimation']['estChannelDelay']
        ############################################################
        addEqu = 1
        mu = experimentSpec['adaptMu']
        iteration = experimentSpec['equaIteration']
        # Message to be encoded:
        message = ''.join(random.choice(string.uppercase + string.digits)
                          for _ in range((packetLen + 7) // 8))
        
        # initialize random number generator.
        # this seed is fixed constant in order
        # to get deterministic results in this example.
        # channelLen = len(channelCof)

        # channel_power = 0
        # for i in range(channelLen):
        # channel_power += abs(channelCof[i])**2
        # channel_dev = math.sqrt(channel_power)
        # channelCof = [ch/channel_dev for ch in channelCof]
        map_func = lambda value: mapper.map(value)

        # Instantiate an encoder
        encoder = Encoder(k, map_func, message, packetLen)

        # spine length
        spine_length = (packetLen + (k - 1)) / k
        lt = (spine_length - 1) % pun
        # encode  message, add the tail
        acc = 0
        symbols_C = []
        for i in range(spine_length):
            if i % pun == lt:
                # print i,passNum
                acc += 1
                # make up the addition of tail
                if acc <= 2*tail and acc % 2 == 0:
                    passNumTemp = passNum - 1
                else:
                    passNumTemp = passNum
                for p in range(passNumTemp):
                    symbols_C.append(encoder.get_symbol(i))
        for t in range(tail):
            symbols_C.append(encoder.get_symbol(spine_length - 1))
        # make sure we got the expected result
        channelLen = len(channelCof)
        # pdb.set_trace()
        noisy_symbols_C, realDelay = channelProcess(
            symbols_C, SNR_dB)
        # instantiate decoder
        if experimentSpec['estimation']['addEstimate']:
            delay = estDelay
        else:
            delay = realDelay
        # print delay, realDelay
        # print channelCof
        channelIn = channelCof[:]
        decoder = Decoder(k, B, d, channelLen, delay, passNum, map_func, adaptable, channelIn)
        # update decoder with gathered points
        acc = 0
        codeAcc = 0
        for i in xrange(spine_length):
            symbols_in = []
            if i % pun == lt:
                acc += 1
                if acc <= 2*tail and acc % 2 == 0:
                    passNumTemp = passNum - 1
                else:
                    passNumTemp = passNum
                for ii in range(passNumTemp):
                    symbols_in.append(noisy_symbols_C[codeAcc + ii])
                codeAcc += passNumTemp
            if i == spine_length - 1:
                for ii in range(tail):
                    symbols_in.append(noisy_symbols_C[codeAcc + ii])
                # print symbols_in
            if addEqu:
                decoder.advance_fading(symbols_in, mu)
            else:
                decoder.advance(symbols_in)
        # pdb.set_trace()
        results, newchannel = decoder.get_most_likely(noisy_symbols_C, iteration, mu)
        error, totalBits = self._errorStatistics(results, message, packetLen)
        if error == 0:
            channelCof = newchannel
            # print channelCof
        channelCof = newchannel
        print error, totalBits
        return error, totalBits, channelCof
Example #26
0
cnn.add(LeakyReLU(alpha=0.2))
# cnn.add(ReLU())
cnn.add(Dropout(0.2))

cnn.add(Conv2D(128, (3, 3), strides=(2, 2), padding='same'))
# cnn.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2), padding='same'))
cnn.add(BatchNormalization(momentum=0.9))
cnn.add(LeakyReLU(alpha=0.2))
# cnn.add(ReLU())
cnn.add(Dropout(0.2))

cnn.add(Flatten())

transform = Sequential()
transform.add(TimeDistributed(cnn))
transform.add(Encoder(2, 1024, 2, 0.2))
transform.add(
    Dense(1, activation="sigmoid", kernel_regularizer=regularizers.l2(0.01)))
transform.compile(loss='binary_crossentropy',
                  optimizer=Adam(lr=0.0002, beta_1=0.5),
                  metrics=['accuracy'])
transform.build(input_shape=input_shape)
transform.summary()

print("Start training...")
epoch = 0
BATCH_NUM = int(len(train_data) / BATCH_SIZE) + 1

for i in range(ITERATIONS):

    ix = np.random.randint(0, len(train_data), BATCH_SIZE)
Example #27
0
class Model(nn.Module):
    def __init__(self, config):
        super(Model, self).__init__()

        self.config = config

        # 情感嵌入层
        self.affect_embedding = AffectEmbedding(config.num_vocab,
                                                config.affect_embedding_size,
                                                config.pad_id)

        # 定义嵌入层
        self.embedding = WordEmbedding(
            config.num_vocab,  # 词汇表大小
            config.embedding_size,  # 嵌入层维度
            config.pad_id)  # pad_id

        # post编码器
        self.post_encoder = Encoder(
            config.post_encoder_cell_type,  # rnn类型
            config.embedding_size,  # 输入维度
            config.post_encoder_output_size,  # 输出维度
            config.post_encoder_num_layers,  # rnn层数
            config.post_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # response编码器
        self.response_encoder = Encoder(
            config.response_encoder_cell_type,
            config.embedding_size,  # 输入维度
            config.response_encoder_output_size,  # 输出维度
            config.response_encoder_num_layers,  # rnn层数
            config.response_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # 先验网络
        self.prior_net = PriorNet(
            config.post_encoder_output_size,  # post输入维度
            config.latent_size,  # 潜变量维度
            config.dims_prior)  # 隐藏层维度

        # 识别网络
        self.recognize_net = RecognizeNet(
            config.post_encoder_output_size,  # post输入维度
            config.response_encoder_output_size,  # response输入维度
            config.latent_size,  # 潜变量维度
            config.dims_recognize)  # 隐藏层维度

        # 初始化解码器状态
        self.prepare_state = PrepareState(
            config.post_encoder_output_size + config.latent_size,
            config.decoder_cell_type, config.decoder_output_size,
            config.decoder_num_layers)

        # 解码器
        self.decoder = Decoder(
            config.decoder_cell_type,  # rnn类型
            config.embedding_size,  # 输入维度
            config.decoder_output_size,  # 输出维度
            config.decoder_num_layers,  # rnn层数
            config.dropout)  # dropout概率

        # 输出层
        self.projector = nn.Sequential(
            nn.Linear(config.decoder_output_size, config.num_vocab),
            nn.Softmax(-1))

    def forward(
            self,
            input,
            inference=False,  # 是否测试
            use_true=False,
            max_len=60):  # 解码的最大长度

        if not inference:  # 训练
            if use_true:
                id_posts = input['posts']  # [batch, seq]
                len_posts = input['len_posts']  # [batch]
                id_responses = input['responses']  # [batch, seq]
                len_responses = input['len_responses']  # [batch, seq]
                sampled_latents = input[
                    'sampled_latents']  # [batch, latent_size]

                embed_posts = self.embedding(
                    id_posts)  # [batch, seq, embed_size]
                embed_responses = self.embedding(
                    id_responses)  # [batch, seq, embed_size]

                # 解码器的输入为回复去掉end_id
                decoder_input = embed_responses[:, :-1, :].transpose(
                    0, 1)  # [seq-1, batch, embed_size]
                len_decoder = decoder_input.size()[0]  # 解码长度 seq-1
                decoder_input = decoder_input.split(
                    [1] * len_decoder,
                    0)  # 解码器每一步的输入 seq-1个[1, batch, embed_size]

                # state = [layers, batch, dim]
                _, state_posts = self.post_encoder(embed_posts.transpose(0, 1),
                                                   len_posts)
                _, state_responses = self.response_encoder(
                    embed_responses.transpose(0, 1), len_responses)
                if isinstance(state_posts, tuple):
                    state_posts = state_posts[0]
                if isinstance(state_responses, tuple):
                    state_responses = state_responses[0]
                x = state_posts[-1, :, :]  # [batch, dim]
                y = state_responses[-1, :, :]  # [batch, dim]

                _mu, _logvar = self.prior_net(x)  # [batch, latent]
                mu, logvar = self.recognize_net(x, y)  # [batch, latent]
                z = mu + (0.5 *
                          logvar).exp() * sampled_latents  # [batch, latent]

                first_state = self.prepare_state(torch.cat(
                    [z, x], 1))  # [num_layer, batch, dim_out]
                outputs = []

                for idx in range(len_decoder):
                    if idx == 0:
                        state = first_state  # 解码器初始状态
                    input = decoder_input[
                        idx]  # 当前时间步输入 [1, batch, embed_size]
                    # output: [1, batch, dim_out]
                    # state: [num_layer, batch, dim_out]
                    output, state = self.decoder(input, state)
                    outputs.append(output)

                outputs = torch.cat(outputs,
                                    0).transpose(0,
                                                 1)  # [batch, seq-1, dim_out]
                output_vocab = self.projector(
                    outputs)  # [batch, seq-1, num_vocab]

                return output_vocab, _mu, _logvar, mu, logvar

            else:
                id_posts = input['posts']  # [batch, seq]
                len_posts = input['len_posts']  # [batch]
                id_responses = input['responses']  # [batch, seq]
                len_responses = input['len_responses']  # [batch]
                sampled_latents = input[
                    'sampled_latents']  # [batch, latent_size]
                len_decoder = id_responses.size()[1] - 1
                batch_size = id_posts.size()[0]
                device = id_posts.device.type

                embed_posts = self.embedding(
                    id_posts)  # [batch, seq, embed_size]
                embed_responses = self.embedding(
                    id_responses)  # [batch, seq, embed_size]

                # state = [layers, batch, dim]
                _, state_posts = self.post_encoder(embed_posts.transpose(0, 1),
                                                   len_posts)
                _, state_responses = self.response_encoder(
                    embed_responses.transpose(0, 1), len_responses)
                if isinstance(state_posts, tuple):
                    state_posts = state_posts[0]
                if isinstance(state_responses, tuple):
                    state_responses = state_responses[0]
                x = state_posts[-1, :, :]  # [batch, dim]
                y = state_responses[-1, :, :]  # [batch, dim]

                _mu, _logvar = self.prior_net(x)  # [batch, latent]
                mu, logvar = self.recognize_net(x, y)  # [batch, latent]
                z = mu + (0.5 *
                          logvar).exp() * sampled_latents  # [batch, latent]

                first_state = self.prepare_state(torch.cat(
                    [z, x], 1))  # [num_layer, batch, dim_out]
                first_input_id = (torch.ones(
                    (1, batch_size)) * self.config.start_id).long()
                if device == 'cuda':
                    first_input_id = first_input_id.cuda()
                outputs = []

                for idx in range(len_decoder):
                    if idx == 0:
                        state = first_state
                        input = self.embedding(first_input_id)
                    else:
                        input = self.embedding(
                            next_input_id)  # 当前时间步输入 [1, batch, embed_size]
                    output, state = self.decoder(input, state)
                    outputs.append(output)

                    vocab_prob = self.projector(
                        output)  # [1, batch, num_vocab]
                    next_input_id = torch.argmax(
                        vocab_prob, 2)  # 选择概率最大的词作为下个时间步的输入 [1, batch]

                outputs = torch.cat(outputs,
                                    0).transpose(0,
                                                 1)  # [batch, seq-1, dim_out]
                output_vocab = self.projector(
                    outputs)  # [batch, seq-1, num_vocab]

                return output_vocab, _mu, _logvar, mu, logvar

        else:  # 测试

            id_posts = input['posts']  # [batch, seq]
            len_posts = input['len_posts']  # [batch]
            sampled_latents = input['sampled_latents']  # [batch, latent_size]
            batch_size = id_posts.size()[0]
            device = id_posts.device.type

            embed_posts = self.embedding(id_posts)  # [batch, seq, embed_size]

            # state = [layers, batch, dim]
            _, state_posts = self.post_encoder(embed_posts.transpose(0, 1),
                                               len_posts)
            if isinstance(state_posts, tuple):  # 如果是lstm则取h
                state_posts = state_posts[0]  # [layers, batch, dim]
            x = state_posts[-1, :, :]  # 取最后一层 [batch, dim]

            _mu, _logvar = self.prior_net(x)  # [batch, latent]
            z = _mu + (0.5 *
                       _logvar).exp() * sampled_latents  # [batch, latent]

            first_state = self.prepare_state(torch.cat(
                [z, x], 1))  # [num_layer, batch, dim_out]
            outputs = []

            done = torch.BoolTensor([0] * batch_size)
            first_input_id = (torch.ones(
                (1, batch_size)) * self.config.start_id).long()
            if device == 'cuda':
                done = done.cuda()
                first_input_id = first_input_id.cuda()

            for idx in range(max_len):
                if idx == 0:  # 第一个时间步
                    state = first_state  # 解码器初始状态
                    input = self.embedding(
                        first_input_id)  # 解码器初始输入 [1, batch, embed_size]

                # output: [1, batch, dim_out]
                # state: [num_layers, batch, dim_out]
                output, state = self.decoder(input, state)
                outputs.append(output)

                vocab_prob = self.projector(output)  # [1, batch, num_vocab]
                next_input_id = torch.argmax(
                    vocab_prob, 2)  # 选择概率最大的词作为下个时间步的输入 [1, batch]
                _done = next_input_id.squeeze(
                    0) == self.config.end_id  # 当前时间步完成解码的 [batch]
                done = done | _done  # 所有完成解码的
                if done.sum() == batch_size:  # 如果全部解码完成则提前停止
                    break
                else:
                    input = self.embedding(
                        next_input_id)  # [1, batch, embed_size]

            outputs = torch.cat(outputs,
                                0).transpose(0, 1)  # [batch, seq, dim_out]
            output_vocab = self.projector(outputs)  # [batch, seq, num_vocab]

            return output_vocab, _mu, _logvar, None, None

    # 统计参数
    def print_parameters(self):
        def statistic_param(params):
            total_num = 0  # 参数总数
            for param in params:
                num = 1
                if param.requires_grad:
                    size = param.size()
                    for dim in size:
                        num *= dim
                total_num += num
            return total_num

        print("嵌入层参数个数: %d" % statistic_param(self.embedding.parameters()))
        print("post编码器参数个数: %d" %
              statistic_param(self.post_encoder.parameters()))
        print("response编码器参数个数: %d" %
              statistic_param(self.response_encoder.parameters()))
        print("先验网络参数个数: %d" % statistic_param(self.prior_net.parameters()))
        print("识别网络参数个数: %d" %
              statistic_param(self.recognize_net.parameters()))
        print("解码器初始状态参数个数: %d" %
              statistic_param(self.prepare_state.parameters()))
        print("解码器参数个数: %d" % statistic_param(self.decoder.parameters()))
        print("输出层参数个数: %d" % statistic_param(self.projector.parameters()))
        print("参数总数: %d" % statistic_param(self.parameters()))

    # 保存模型
    def save_model(self, epoch, global_step, path):

        torch.save(
            {
                'affect_embedding': self.affect_embedding.state_dict(),
                'embedding': self.embedding.state_dict(),
                'post_encoder': self.post_encoder.state_dict(),
                'response_encoder': self.response_encoder.state_dict(),
                'prior_net': self.prior_net.state_dict(),
                'recognize_net': self.recognize_net.state_dict(),
                'prepare_state': self.prepare_state.state_dict(),
                'decoder': self.decoder.state_dict(),
                'projector': self.projector.state_dict(),
                'epoch': epoch,
                'global_step': global_step
            }, path)

    # 载入模型
    def load_model(self, path):

        checkpoint = torch.load(path)
        self.affect_embedding.load_state_dict(checkpoint['affect_embedding'])
        self.embedding.load_state_dict(checkpoint['embedding'])
        self.post_encoder.load_state_dict(checkpoint['post_encoder'])
        self.response_encoder.load_state_dict(checkpoint['response_encoder'])
        self.prior_net.load_state_dict(checkpoint['prior_net'])
        self.recognize_net.load_state_dict(checkpoint['recognize_net'])
        self.prepare_state.load_state_dict(checkpoint['prepare_state'])
        self.decoder.load_state_dict(checkpoint['decoder'])
        self.projector.load_state_dict(checkpoint['projector'])
        epoch = checkpoint['epoch']
        global_step = checkpoint['global_step']

        return epoch, global_step
#!/usr/bin/env python
from Encoder import Encoder
import time

encoder = Encoder(38, 40)
t = time.time()
while True:
    cycles, dir_flag = encoder.get_cycles()
    if dir_flag:
        t = time.time()
        print "Wheel changed directions!"
        continue
    if cycles != 0:
        dt = time.time() - t
        t = time.time()
        print "Completed cycle in %f seconds" % dt
# To test your trainer and  arePantsonFire class, Just create random tensor and see if everything is working or not.
from torch.utils.data import DataLoader

# Your code goes here.
from trainer import trainer
from utils import *
from datasets import dataset
from Encoder import Encoder
from LiarLiar import arePantsonFire
from Attention import MultiHeadAttention, PositionFeedforward
liar_dataset_train = dataset(prep_Data_from='train')
liar_dataset_val = dataset(prep_Data_from='val')
sent_len, just_len = liar_dataset_train.get_max_lenghts()
dataloader_train = DataLoader(dataset=liar_dataset_train, batch_size=50)
dataloader_val = DataLoader(dataset=liar_dataset_val, batch_size=25)
statement_encoder = Encoder(hidden_dim=512, conv_layers=5)
justification_encoder = Encoder(hidden_dim=512, conv_layers=5)
multiheadAttention = MultiHeadAttention(hid_dim=512, n_heads=32)
positionFeedForward = PositionFeedforward(hid_dim=512, feedForward_dim=2048)
model = arePantsonFire(statement_encoder, justification_encoder,
                       multiheadAttention, positionFeedForward, 512, sent_len,
                       just_len, liar_dataset_train.embedding_dim, 'cpu')
trainer(model,
        dataloader_train,
        dataloader_val,
        num_epochs=1,
        train_batch=1,
        test_batch=1,
        device='cpu')

# Do not change module_list , otherwise no marks will be awarded
Example #30
0
def main():
    visualize = True

    # Read configuration file
    parser = argparse.ArgumentParser()
    parser.add_argument("-mp", help="path to the model checkpoint")
    parser.add_argument("-ep", help="path to the encoder weights")
    parser.add_argument("-pi", help="path to the pickle input file")
    parser.add_argument("-op",
                        help="path to the CAD model for the object",
                        default=None)
    parser.add_argument("-o", help="output path", default="./output.csv")
    args = parser.parse_args()

    # Load dataset
    data = pickle.load(open(args.pi, "rb"), encoding="latin1")

    # Run prepare our model if needed
    if ("Rs_predicted" not in data):

        # Set the cuda device
        device = torch.device("cuda:0")
        torch.cuda.set_device(device)

        # Initialize a model
        model = Model().to(device)

        # Load model checkpoint
        model, optimizer, epoch, learning_rate = loadCheckpoint(
            args.mp, device)
        model.to(device)
        model.eval()

        # Load and prepare encoder
        encoder = Encoder(args.ep).to(device)
        encoder.eval()

        # Setup the pipeline
        pipeline = Pipeline(encoder, model, device)

    # Prepare renderer if defined
    obj_path = args.op
    if (obj_path is not None):
        obj_model = inout.load_ply(obj_path.replace(".obj", ".ply"))
        img_size = 128
        K = np.array([
            1075.65091572, 0.0, 128.0 / 2.0, 0.0, 1073.90347929, 128.0 / 2.0,
            0.0, 0.0, 1.0
        ]).reshape(3, 3)
        renderer = Renderer(obj_model, (img_size, img_size),
                            K,
                            surf_color=(1, 1, 1),
                            mode='rgb',
                            random_light=False)
    else:
        renderer = None

    # Store results in a dict
    results = {
        "scene_id": [],
        "im_id": [],
        "obj_id": [],
        "score": [],
        "R": [],
        "t": [],
        "time": []
    }

    # Loop through dataset
    for i, img in enumerate(data["images"]):
        print("Current image: {0}/{1}".format(i + 1, len(data["images"])))

        if ("Rs_predicted" in data):
            R_predicted = data["Rs_predicted"][i]
        else:

            # Run through model
            predicted_poses = pipeline.process([img])

            # Find best pose
            num_views = int(predicted_poses.shape[1] / (6 + 1))
            pose_start = num_views
            pose_end = pose_start + 6
            best_pose = 0.0
            R_predicted = None

            for k in range(num_views):
                # Extract current pose and move to next one
                curr_pose = predicted_poses[:, pose_start:pose_end]
                print(curr_pose)
                Rs_predicted = compute_rotation_matrix_from_ortho6d(curr_pose)
                Rs_predicted = Rs_predicted.detach().cpu().numpy()[0]
                pose_start = pose_end
                pose_end = pose_start + 6

                conf = predicted_poses[:, k].detach().cpu().numpy()[0]
                if (conf > best_pose):
                    R_predicted = Rs_predicted
                    best_pose = conf

            # Invert xy axes
            xy_flip = np.eye(3, dtype=np.float)
            xy_flip[0, 0] = -1.0
            xy_flip[1, 1] = -1.0
            R_predicted = R_predicted.dot(xy_flip)

            # Inverse rotation matrix
            R_predicted = np.transpose(R_predicted)

        results["scene_id"].append(data["scene_ids"][i])
        results["im_id"].append(data["img_ids"][i])
        results["obj_id"].append(data["obj_ids"][i])
        results["score"].append(-1)
        results["R"].append(arr2str(R_predicted))
        results["t"].append(arr2str(data["ts"][i]))
        results["time"].append(-1)

        if (renderer is None):
            visualize = False

        if (visualize):
            t_gt = np.array(data["ts"][i])
            t = np.array([0, 0, t_gt[2]])

            # Render predicted pose
            R_predicted = correct_trans_offset(R_predicted, t_gt)
            ren_predicted = renderer.render(R_predicted, t)

            # Render groundtruth pose
            R_gt = data["Rs"][i]
            R_gt = correct_trans_offset(R_gt, t_gt)
            ren_gt = renderer.render(R_gt, t)

            cv2.imshow("gt render", np.flip(ren_gt, axis=2))
            cv2.imshow("predict render", np.flip(ren_predicted, axis=2))

            cv2.imshow("input image", np.flip(img, axis=2))
            if ("codebook_images" in data):
                cv2.imshow("codebook image",
                           np.flip(data["codebook_images"][i], axis=2))

            print(ren_gt.shape)
            print(ren_predicted.shape)
            print(img.shape)
            numpy_horizontal_concat = np.concatenate(
                (np.flip(ren_gt, axis=2), np.flip(
                    ren_predicted, axis=2), np.flip(img, axis=2)),
                axis=1)
            cv2.imshow("gt - prediction - input", numpy_horizontal_concat)
            key = cv2.waitKey(0)
            if (key == ord("q")):
                exit()
                visualize = False
                #break
                continue

    # Save to CSV
    output_path = args.o
    print("Saving to: ", output_path)
    with open(output_path, "w") as f:
        col_names = list(results.keys())
        w = csv.DictWriter(f, results.keys())
        w.writeheader()
        num_lines = len(results[col_names[0]])

        for i in np.arange(num_lines):
            row_dict = {}
            for c in col_names:
                row_dict[c] = results[c][i]
            w.writerow(row_dict)
def main():
    global optimizer, lr_reducer, views, epoch, pipeline
    # Read configuration file
    parser = argparse.ArgumentParser()
    parser.add_argument("experiment_name")
    arguments = parser.parse_args()

    cfg_file_path = os.path.join("./experiments", arguments.experiment_name)
    args = configparser.ConfigParser()
    args.read(cfg_file_path)

    seed=args.getint('Training', 'RANDOM_SEED')
    if(seed is not None):
        torch.manual_seed(seed)
        #torch.use_deterministic_algorithms(True) # Requires pytorch>=1.8.0
        #torch.backends.cudnn.deterministic = True
        np.random.seed(seed=seed)
        ia.seed(seed)
        random.seed(seed)

    model_seed=args.getint('Training', 'MODEL_RANDOM_SEED', fallback=None)
    if(model_seed is not None):
        torch.manual_seed(model_seed)

    # Prepare rotation matrices for multi view loss function
    eulerViews = json.loads(args.get('Rendering', 'VIEWS'))
    views = prepareViews(eulerViews)

    # Set the cuda device
    device = torch.device("cuda:0")
    torch.cuda.set_device(device)

    # Handle loading of multiple object paths
    try:
        model_path_loss = json.loads(args.get('Dataset', 'MODEL_PATH_LOSS'))
    except:
        model_path_loss = [args.get('Dataset', 'MODEL_PATH_LOSS')]

    # Set up batch renderer
    br = BatchRender(model_path_loss,
                     device,
                     batch_size=args.getint('Training', 'BATCH_SIZE'),
                     faces_per_pixel=args.getint('Rendering', 'FACES_PER_PIXEL'),
                     render_method=args.get('Rendering', 'SHADER'),
                     image_size=args.getint('Rendering', 'IMAGE_SIZE'),
                     norm_verts=args.getboolean('Rendering', 'NORMALIZE_VERTICES'))

    # Set size of model output depending on pose representation - deprecated?
    pose_rep = args.get('Training', 'POSE_REPRESENTATION')
    if(pose_rep == '6d-pose'):
        pose_dim = 6
    elif(pose_rep == 'quat'):
        pose_dim = 4
    elif(pose_rep == 'axis-angle'):
        pose_dim = 4
    elif(pose_rep == 'euler'):
        pose_dim = 3
    else:
        print("Unknown pose representation specified: ", pose_rep)
        pose_dim = -1

    # Initialize a model using the renderer, mesh and reference image
    model = Model(num_views=len(views),
                  weight_init_name=args.get('Training', 'WEIGHT_INIT_NAME', fallback=""))
    model.to(device)

    # Create an optimizer. Here we are using Adam and we pass in the parameters of the model
    low_lr = args.getfloat('Training', 'LEARNING_RATE_LOW')
    high_lr = args.getfloat('Training', 'LEARNING_RATE_HIGH')
    optimizer = torch.optim.Adam(model.parameters(), lr=low_lr)
    lr_reducer = OneCycleLR(optimizer, num_steps=args.getfloat('Training', 'NUM_ITER'), lr_range=(low_lr, high_lr))

    # Prepare output directories
    output_path = args.get('Training', 'OUTPUT_PATH')
    prepareDir(output_path)
    shutil.copy(cfg_file_path, os.path.join(output_path, cfg_file_path.split("/")[-1]))

    # Setup early stopping if enabled
    early_stopping = args.getboolean('Training', 'EARLY_STOPPING', fallback=False)
    if early_stopping:
        window = args.getint('Training', 'STOPPING_WINDOW', fallback=10)
        time_limit = args.getint('Training', 'STOPPING_TIME_LIMIT', fallback=10)
        window_means = []
        lowest_mean = np.inf
        lowest_x = 0
        timer = 0

    # Load checkpoint for last epoch if it exists
    model_path = latestCheckpoint(os.path.join(output_path, "models/"))
    if(model_path is not None):
        model, optimizer, epoch, lr_reducer = loadCheckpoint(model_path)

    if early_stopping:
        validation_csv=os.path.join(output_path, "validation-loss.csv")
        if os.path.exists(validation_csv):
            with open(validation_csv) as f:
                val_reader = csv.reader(f, delimiter='\n')
                val_loss = list(val_reader)
            val_losses = np.array(val_loss, dtype=np.float32).flatten()
            for epoch in range(window,len(val_loss)):
                timer += 1
                w_mean = np.mean(val_losses[epoch-window:epoch])
                window_means.append(w_mean)
                if w_mean < lowest_mean:
                    lowest_mean = w_mean
                    lowest_x = epoch
                    timer = 0


    # Prepare pipeline
    encoder = Encoder(args.get('Dataset', 'ENCODER_WEIGHTS')).to(device)
    encoder.eval()
    pipeline = Pipeline(encoder, model, device)

    # Handle loading of multiple object paths and translations
    try:
        model_path_data = json.loads(args.get('Dataset', 'MODEL_PATH_DATA'))
        translations = np.array(json.loads(args.get('Rendering', 'T')))
    except:
        model_path_data = [args.get('Dataset', 'MODEL_PATH_DATA')]
        translations = [np.array(json.loads(args.get('Rendering', 'T')))]

    # Prepare datasets
    bg_path = "../../autoencoder_ws/data/VOC2012/JPEGImages/"
    training_data = DatasetGenerator(args.get('Dataset', 'BACKGROUND_IMAGES'),
                                     model_path_data,
                                     translations,
                                     args.getint('Training', 'BATCH_SIZE'),
                                     "not_used",
                                     device,
                                     sampling_method = args.get('Training', 'VIEW_SAMPLING'),
                                     max_rel_offset = args.getfloat('Training', 'MAX_REL_OFFSET', fallback=0.2),
                                     augment_imgs = args.getboolean('Training', 'AUGMENT_IMGS', fallback=True),
                                     seed=args.getint('Training', 'RANDOM_SEED'))
    training_data.max_samples = args.getint('Training', 'NUM_SAMPLES')

    # Load the validationset
    validation_data = loadDataset(json.loads(args.get('Dataset', 'VALID_DATA_PATH')),
                                  args.getint('Training', 'BATCH_SIZE'))
    print("Loaded validation set!")

    # Start training
    while(epoch < args.getint('Training', 'NUM_ITER')):
        # Train on synthetic data
        model = model.train() # Set model to train mode
        loss = runEpoch(br, training_data, model, device, output_path,
                          t=translations, config=args)
        append2file([loss], os.path.join(output_path, "train-loss.csv"))
        append2file([lr_reducer.get_lr()], os.path.join(output_path, "learning-rate.csv"))

        # Test on validation data
        model = model.eval() # Set model to eval mode
        val_loss = runEpoch(br, validation_data, model, device, output_path,
                          t=translations, config=args)
        append2file([val_loss], os.path.join(output_path, "validation-loss.csv"))

        # Plot losses
        val_losses = plotLoss(os.path.join(output_path, "train-loss.csv"),
                 os.path.join(output_path, "train-loss.png"),
                 validation_csv=os.path.join(output_path, "validation-loss.csv"))
        print("-"*20)
        print("Epoch: {0} - train loss: {1} - validation loss: {2}".format(epoch,loss,val_loss))
        print("-"*20)
        if early_stopping and epoch >= window:
            timer += 1
            if timer > time_limit:
                # print stuff here
                print()
                print("-"*60)
                print("Validation loss seems to have plateaued, stopping early.")
                print("Best mean loss value over an epoch window of size {} was found at epoch {} ({:.8f} mean loss)".format(window, lowest_x, lowest_mean))
                print("-"*60)
                break
            w_mean = np.mean(val_losses[epoch-window:epoch])
            window_means.append(w_mean)
            if w_mean < lowest_mean:
                lowest_mean = w_mean
                lowest_x = epoch
                timer = 0
        epoch = epoch+1
    for t in range(max_len_target):
        predictions, dec_hidden, attention_weights = decoder(
            dec_input, dec_hidden, enc_out)
        predicted_id = tf.argmax(predictions[0]).numpy()
        target_word = target_lang_tokenizer.index_word[predicted_id]
        result += target_word + ' '

        if target_word == '<end>':
            return result, sentence

        dec_input = tf.expand_dims([predicted_id], 0)

    return result, sentence


if __name__ == '__main__':
    with open('datasetflow.pkl', 'rb') as dataset_file:
        dataset_flow = pickle.load(dataset_file)

    BATCH_SIZE = 2
    vocab_inp_size = len(dataset_flow.input_tokenizer.word_index) + 1
    vocab_target_size = len(dataset_flow.target_tokenizer.word_index) + 1
    embedding_dim = 128
    units = 512
    print(vocab_inp_size, vocab_target_size)
    encoder = Encoder(vocab_inp_size, embedding_dim, units, BATCH_SIZE)
    decoder = Decoder(vocab_target_size, embedding_dim, units, BATCH_SIZE)
    encoder.initialize_hidden_state()
    encoder.load_weights('model_checkpoint/encoder.h5')
    decoder.load_weights('model_checkpoint/decoder.h5')
Example #33
0
 def addEncoder(self, conv):
     self.Encoder = Encoder(conv)
Example #34
0
]

if __name__ == '__main__':
    from Encoder import Encoder
    from Decoder import Decoder
    from SymbolMapper import SymbolMapper
    import random
    import math

    mapper = SymbolMapper(c, precision)
    map_func = lambda value: mapper.map(value)

    #Instantiate an encoder
    print 'Message string: "%s"' % message
    print 'Message hex:', message.encode("hex")
    encoder = Encoder(k, map_func, message)

    # spine length
    n = 8 * len(message)
    spine_length = (n + (k - 1)) / k

    # encode a short message, making 3 passes
    print "Producing 3 passes."
    symbols = [encoder.get_symbol(i) for i in range(spine_length) * 3]
    print "symbols: ", symbols

    # make sure we got the expected result
    assert (expected_encoder_output == symbols)

    # get average signal power
    signal_power = mapper.get_signal_average_power()
Example #35
0
class Model(nn.Module):
    def __init__(self, config):
        super(Model, self).__init__()
        self.config = config

        # 定义嵌入层
        self.embedding = Embedding(
            config.num_vocab,  # 词汇表大小
            config.embedding_size,  # 嵌入层维度
            config.pad_id,  # pad_id
            config.dropout)

        # 情感嵌入层
        self.affect_embedding = Embedding(config.num_vocab,
                                          config.affect_embedding_size,
                                          config.pad_id, config.dropout)
        self.affect_embedding.embedding.weight.requires_grad = False

        # post编码器
        self.post_encoder = Encoder(
            config.post_encoder_cell_type,  # rnn类型
            config.embedding_size + config.affect_embedding_size,  # 输入维度
            config.post_encoder_output_size,  # 输出维度
            config.post_encoder_num_layers,  # rnn层数
            config.post_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # response编码器
        self.response_encoder = Encoder(
            config.response_encoder_cell_type,
            config.embedding_size + config.affect_embedding_size,  # 输入维度
            config.response_encoder_output_size,  # 输出维度
            config.response_encoder_num_layers,  # rnn层数
            config.response_encoder_bidirectional,  # 是否双向
            config.dropout)  # dropout概率

        # 先验网络
        self.prior_net = PriorNet(
            config.post_encoder_output_size,  # post输入维度
            config.latent_size,  # 潜变量维度
            config.dims_prior)  # 隐藏层维度

        # 识别网络
        self.recognize_net = RecognizeNet(
            config.post_encoder_output_size,  # post输入维度
            config.response_encoder_output_size,  # response输入维度
            config.latent_size,  # 潜变量维度
            config.dims_recognize)  # 隐藏层维度

        # 初始化解码器状态
        self.prepare_state = PrepareState(
            config.post_encoder_output_size + config.latent_size,
            config.decoder_cell_type, config.decoder_output_size,
            config.decoder_num_layers)

        # 解码器
        self.decoder = Decoder(
            config.decoder_cell_type,  # rnn类型
            config.embedding_size + config.affect_embedding_size +
            config.post_encoder_output_size,
            config.decoder_output_size,  # 输出维度
            config.decoder_num_layers,  # rnn层数
            config.dropout)  # dropout概率

        # bow预测
        self.bow_predictor = nn.Sequential(
            nn.Linear(config.post_encoder_output_size + config.latent_size,
                      config.num_vocab), nn.Softmax(-1))

        # 输出层
        self.projector = nn.Sequential(
            nn.Linear(config.decoder_output_size, config.num_vocab),
            nn.Softmax(-1))

    def forward(self,
                inputs,
                inference=False,
                use_true=False,
                max_len=60,
                gpu=True):
        if not inference:  # 训练
            if use_true:  # 解码时使用真实值
                id_posts = inputs['posts']  # [batch, seq]
                len_posts = inputs['len_posts']  # [batch]
                id_responses = inputs['responses']  # [batch, seq]
                len_responses = inputs['len_responses']  # [batch, seq]
                sampled_latents = inputs[
                    'sampled_latents']  # [batch, latent_size]
                len_decoder = id_responses.size(1) - 1

                embed_posts = torch.cat([
                    self.embedding(id_posts),
                    self.affect_embedding(id_posts)
                ], 2)
                embed_responses = torch.cat([
                    self.embedding(id_responses),
                    self.affect_embedding(id_responses)
                ], 2)

                # state: [layers, batch, dim]
                _, state_posts = self.post_encoder(embed_posts.transpose(0, 1),
                                                   len_posts)
                _, state_responses = self.response_encoder(
                    embed_responses.transpose(0, 1), len_responses)
                if isinstance(state_posts, tuple):
                    state_posts = state_posts[0]
                if isinstance(state_responses, tuple):
                    state_responses = state_responses[0]
                x = state_posts[-1, :, :]  # [batch, dim]
                y = state_responses[-1, :, :]  # [batch, dim]

                _mu, _logvar = self.prior_net(x)  # [batch, latent]
                mu, logvar = self.recognize_net(x, y)  # [batch, latent]
                z = mu + (0.5 *
                          logvar).exp() * sampled_latents  # [batch, latent]

                bow_predict = self.bow_predictor(torch.cat(
                    [z, x], 1))  # [batch, num_vocab]

                first_state = self.prepare_state(torch.cat(
                    [z, x], 1))  # [num_layer, batch, dim_out]
                decoder_inputs = embed_responses[:, :-1, :].transpose(
                    0, 1)  # [seq-1, batch, embed_size]
                decoder_inputs = decoder_inputs.split(
                    [1] * len_decoder, 0)  # seq-1个[1, batch, embed_size]

                outputs = []
                for idx in range(len_decoder):
                    if idx == 0:
                        state = first_state  # 解码器初始状态
                    decoder_input = torch.cat(
                        [decoder_inputs[idx],
                         x.unsqueeze(0)], 2)
                    # output: [1, batch, dim_out]
                    # state: [num_layer, batch, dim_out]
                    output, state = self.decoder(decoder_input, state)
                    outputs.append(output)

                outputs = torch.cat(outputs,
                                    0).transpose(0,
                                                 1)  # [batch, seq-1, dim_out]
                output_vocab = self.projector(
                    outputs)  # [batch, seq-1, num_vocab]

                return output_vocab, bow_predict, _mu, _logvar, mu, logvar
            else:
                id_posts = inputs['posts']  # [batch, seq]
                len_posts = inputs['len_posts']  # [batch]
                id_responses = inputs['responses']  # [batch, seq]
                len_responses = inputs['len_responses']  # [batch]
                sampled_latents = inputs[
                    'sampled_latents']  # [batch, latent_size]
                len_decoder = id_responses.size(1) - 1
                batch_size = id_posts.size(0)

                embed_posts = torch.cat([
                    self.embedding(id_posts),
                    self.affect_embedding(id_posts)
                ], 2)
                embed_responses = torch.cat([
                    self.embedding(id_responses),
                    self.affect_embedding(id_responses)
                ], 2)

                # state: [layers, batch, dim]
                _, state_posts = self.post_encoder(embed_posts.transpose(0, 1),
                                                   len_posts)
                _, state_responses = self.response_encoder(
                    embed_responses.transpose(0, 1), len_responses)
                if isinstance(state_posts, tuple):
                    state_posts = state_posts[0]
                if isinstance(state_responses, tuple):
                    state_responses = state_responses[0]
                x = state_posts[-1, :, :]  # [batch, dim]
                y = state_responses[-1, :, :]  # [batch, dim]

                _mu, _logvar = self.prior_net(x)  # [batch, latent]
                mu, logvar = self.recognize_net(x, y)  # [batch, latent]
                z = mu + (0.5 *
                          logvar).exp() * sampled_latents  # [batch, latent]

                bow_predict = self.bow_predictor(torch.cat(
                    [z, x], 1))  # [batch, num_vocab]

                first_state = self.prepare_state(torch.cat(
                    [z, x], 1))  # [num_layer, batch, dim_out]
                first_input_id = (torch.ones(
                    (1, batch_size)) * self.config.start_id).long()
                if gpu:
                    first_input_id = first_input_id.cuda()

                outputs = []
                for idx in range(len_decoder):
                    if idx == 0:
                        state = first_state
                        decoder_input = torch.cat([
                            self.embedding(first_input_id),
                            self.affect_embedding(first_input_id),
                            x.unsqueeze(0)
                        ], 2)
                    else:
                        decoder_input = torch.cat([
                            self.embedding(next_input_id),
                            self.affect_embedding(next_input_id),
                            x.unsqueeze(0)
                        ], 2)
                    output, state = self.decoder(decoder_input, state)
                    outputs.append(output)

                    vocab_prob = self.projector(
                        output)  # [1, batch, num_vocab]
                    next_input_id = torch.argmax(
                        vocab_prob, 2)  # 选择概率最大的词作为下个时间步的输入 [1, batch]

                outputs = torch.cat(outputs,
                                    0).transpose(0,
                                                 1)  # [batch, seq-1, dim_out]
                output_vocab = self.projector(
                    outputs)  # [batch, seq-1, num_vocab]
                return output_vocab, bow_predict, _mu, _logvar, mu, logvar
        else:  # 测试
            id_posts = inputs['posts']  # [batch, seq]
            len_posts = inputs['len_posts']  # [batch]
            sampled_latents = inputs['sampled_latents']  # [batch, latent_size]
            batch_size = id_posts.size(0)

            embed_posts = torch.cat(
                [self.embedding(id_posts),
                 self.affect_embedding(id_posts)], 2)

            # state: [layers, batch, dim]
            _, state_posts = self.post_encoder(embed_posts.transpose(0, 1),
                                               len_posts)
            if isinstance(state_posts, tuple):  # 如果是lstm则取h
                state_posts = state_posts[0]  # [layers, batch, dim]
            x = state_posts[-1, :, :]  # 取最后一层 [batch, dim]

            _mu, _logvar = self.prior_net(x)  # [batch, latent]
            z = _mu + (0.5 *
                       _logvar).exp() * sampled_latents  # [batch, latent]

            first_state = self.prepare_state(torch.cat(
                [z, x], 1))  # [num_layer, batch, dim_out]
            done = torch.tensor([0] * batch_size).bool()
            first_input_id = (torch.ones(
                (1, batch_size)) * self.config.start_id).long()
            if gpu:
                done = done.cuda()
                first_input_id = first_input_id.cuda()

            outputs = []
            for idx in range(max_len):
                if idx == 0:  # 第一个时间步
                    state = first_state  # 解码器初始状态
                    decoder_input = torch.cat([
                        self.embedding(first_input_id),
                        self.affect_embedding(first_input_id),
                        x.unsqueeze(0)
                    ], 2)
                else:
                    decoder_input = torch.cat([
                        self.embedding(next_input_id),
                        self.affect_embedding(next_input_id),
                        x.unsqueeze(0)
                    ], 2)
                # output: [1, batch, dim_out]
                # state: [num_layers, batch, dim_out]
                output, state = self.decoder(decoder_input, state)
                outputs.append(output)

                vocab_prob = self.projector(output)  # [1, batch, num_vocab]
                next_input_id = torch.argmax(
                    vocab_prob, 2)  # 选择概率最大的词作为下个时间步的输入 [1, batch]
                _done = next_input_id.squeeze(
                    0) == self.config.end_id  # 当前时间步完成解码的 [batch]
                done = done | _done  # 所有完成解码的
                if done.sum() == batch_size:  # 如果全部解码完成则提前停止
                    break

            outputs = torch.cat(outputs,
                                0).transpose(0, 1)  # [batch, seq, dim_out]
            output_vocab = self.projector(outputs)  # [batch, seq, num_vocab]

            return output_vocab, _, _mu, _logvar, None, None

    def print_parameters(self):
        r""" 统计参数 """
        total_num = 0  # 参数总数
        for param in self.parameters():
            num = 1
            if param.requires_grad:
                size = param.size()
                for dim in size:
                    num *= dim
            total_num += num
        print(f"参数总数: {total_num}")

    def save_model(self, epoch, global_step, path):
        r""" 保存模型 """
        torch.save(
            {
                'affect_embedding': self.affect_embedding.state_dict(),
                'embedding': self.embedding.state_dict(),
                'post_encoder': self.post_encoder.state_dict(),
                'response_encoder': self.response_encoder.state_dict(),
                'prior_net': self.prior_net.state_dict(),
                'recognize_net': self.recognize_net.state_dict(),
                'prepare_state': self.prepare_state.state_dict(),
                'decoder': self.decoder.state_dict(),
                'projector': self.projector.state_dict(),
                'bow_predictor': self.bow_predictor.state_dict(),
                'epoch': epoch,
                'global_step': global_step
            }, path)

    def load_model(self, path):
        r""" 载入模型 """
        checkpoint = torch.load(path)
        self.affect_embedding.load_state_dict(checkpoint['affect_embedding'])
        self.embedding.load_state_dict(checkpoint['embedding'])
        self.post_encoder.load_state_dict(checkpoint['post_encoder'])
        self.response_encoder.load_state_dict(checkpoint['response_encoder'])
        self.prior_net.load_state_dict(checkpoint['prior_net'])
        self.recognize_net.load_state_dict(checkpoint['recognize_net'])
        self.prepare_state.load_state_dict(checkpoint['prepare_state'])
        self.decoder.load_state_dict(checkpoint['decoder'])
        self.projector.load_state_dict(checkpoint['projector'])
        self.bow_predictor.load_state_dict(checkpoint['bow_predictor'])
        epoch = checkpoint['epoch']
        global_step = checkpoint['global_step']
        return epoch, global_step
Example #36
0
class Telescope():

    # Class variables, these are static variables just in case multiple instances of Telescope are created
    # Otherwise, we may have two instances of azMotor pointing to the same hardware, which would be very bad

    pi = pigpio.pi()
    azMotor = Motor(azMotorCfg)
    altMotor = Motor(altMotorCfg)
    azEncoder = Encoder()
    altEncoder = Encoder()

    alt = 0
    az = 1
    gearRatio = 729  #?
    currentAngle = np.array([0.0, 0.0])
    calibrationAngle = np.array([0.0, 0.0])

    # These have values for debugging purposes, otherwise None
    LAT, LON = 37.45875324211982, -122.15032378715412
    gps = None

    def __init__(self, server_IP, server_port):
        #Telescope.gps = GPS(server_IP, server_port) #Hangs until done - this is deliberate

        #Telescope.LAT, Telescope.LON = Telescope.gps.getLocation()

        initialAzAngle = Telescope.getAzAngle()
        initialAltAngle = Telescope.getAltAngle()
        initialAngle = np.array([initialAltAngle, initialAzAngle])
        Telescope.currentAngle = initialAngle

        caliFile = open('calibrationAngle.csv', 'r')
        line = caliFile.readline().split(',')
        altCali = line[0]
        azCali = line[1]
        Telescope.calibrationAngle = np.array([float(altCali), float(azCali)])
        caliFile.close()

    # Absolute target angle is passed in
    def target(self, angle):
        dAngle = np.asarray(angle) - Telescope.currentAngle
        self.actuate(dAngle)

    # Relative angle is passed in
    def actuate(self, dAngle):

        constraints_passed = self.checkConstraints(dAngle)

        correction = 0

        if constraints_passed:
            try:
                alt_actuation_angle = dAngle[
                    Telescope.alt] * Telescope.gearRatio
                az_actuation_angle = dAngle[Telescope.az] * Telescope.gearRatio
                # print(f"Alt: {alt_actuation_angle}")
                # print(f"Az: {az_actuation_angle}")
                Telescope.altMotor.actuate(alt_actuation_angle)
                Telescope.azMotor.actuate(az_actuation_angle)
                newAngle = Telescope.currentAngle + dAngle
                Telescope.curentAngle = newAngle
                ''' 
                Code for error correction (needs encoders for implementation)
                time.sleep(0.2)
                Telescope.currentAngle = Telescope.getAngles()
                correction = newAngle - Telescope.currentAngle
                self.correct(correction)
                '''

            except KeyboardInterrupt:
                Telescope.altMotor.cancel()
                Telescope.azMotor.cancel()
                pass
        else:
            print(
                "Target angle outside of physical constraints... \nCommand was aborted"
            )

    def activeTrack(self, angleFunc, timeDelta=10, trackTime=None, **kwargs):
        # Begins a loop over either a certain trackTime (float) or until user override (None)
        # kwargs takes in a  dictionary of arguments for angleFunc.
        # If angleFunc requires information from the telescope at runtime (runtime variables),
        # the value in the dictionary should be the name of 'getter' Telescope function that returns the desired value.
        # For example, if angleFunc(telescope_az_angle) takes in the az angle of the telescope, kwargs should contain
        # kwargs = {'telescope_az_angle': Telescope.getAzAngle}
        # Otherwise, variables not contained in the Telescope class are accepted too.

        # I don't know how I'm supposed to interface with the GPS time from here, just using python time as placeholder
        startTime = time.time()

        try:
            endTime = startTime + trackTime
        except TypeError:
            endTime = None

        keepRunning = True

        try:
            # Main Loop
            while keepRunning:
                now = time.time()

                # Handles runtime variables in kwargs
                for key in kwargs:
                    if callable(kwargs[key]):
                        kwargs[key] = kwargs[key]()

                # Call angleFunc which returns angle difference as [alt, az], then actuate.
                dAngle = angleFunc(**kwargs)
                self.actuate(dAngle)

                time.sleep(timeDelta - (time.time() - now))

                if not trackTime:
                    keepRunning = True
                else:
                    keepRunning = now < endTime

        except KeyboardInterrupt:
            Telescope.altMotor.cancel()
            Telescope.azMotor.cancel()
            print("Keyboard interrupt...")
            trackTime = None

        print("Active tracking terminated")
        return trackTime

    def checkConstraints(self, dAngle):
        # Limitations on single instance actuation
        d_az_min, d_az_max = -360, 360
        d_alt_min, d_alt_max = -90, 90

        # Limitations on absolute actuation
        az_min, az_max = -360, 360  # Keep within one revolution to simplify the encoder's job
        alt_min, alt_max = 0, 90  # Spherical polar coordinates constraints (0 to 90 or 90 to 0?)

        d_alt = dAngle[Telescope.alt]
        d_az = dAngle[Telescope.az]

        next_alt = d_alt + Telescope.currentAngle[Telescope.alt]
        next_az = d_az + Telescope.currentAngle[Telescope.az]

        d_az_good, d_alt_good, az_good, alt_good = False, False, False, False

        if d_az >= d_az_min and d_az <= d_az_max:
            d_az_good = True
        else:
            print(
                f"Input change in azimuthal angle is not within constraints, must be within [{d_az_min}, {d_az_max}]"
            )
        if d_alt >= d_alt_min and d_alt <= d_alt_max:
            d_alt_good = True
        else:
            print(
                f"Input change in altitudinal angle is not within constraints, must be within [{d_alt_min}, {d_alt_max}]"
            )
        if next_az >= az_min and next_az <= az_max:
            az_good = True
        else:
            print(
                f"Azimuthal angle after execution will not be within constraints, must be within [{az_min}, {az_max}]"
            )
        if next_alt >= alt_min and next_alt <= alt_max:
            alt_good = True
        else:
            print(
                f"Altitudinal angle after execution will not be within constraints, must be within [{alt_min}, {alt_max}]"
            )

        return d_az_good and d_alt_good and az_good and alt_good

    def correct(self, correction):
        # 30 arc seconds may be too ambitious, needs testing
        threshold_arc_seconds = 30
        threshold_ddeg = threshold_arc_seconds / 3600.0
        if correction.all() <= threshold_ddeg:
            return
        else:
            print("Skipped steps. Correcting...")
            self.actuate(correction)

    def getCurrentTime():
        time = datetime.now()
        return time

    def getAngles():
        azAngle = Telescope.getAzAngle()
        altAngle = Telescope.getAltAngle()
        angle = np.array([azAngle, altAngle])
        return angle

    def getAzAngle():
        azAngle = Telescope.azEncoder.getAngle()
        #return azAngle
        return Telescope.currentAngle[Telescope.az]

    def getAltAngle():
        altAngle = Telescope.altEncoder.getAngle()
        #return altAngle
        return Telescope.currentAngle[Telescope.alt]

    def getLAT():
        return Telescope.LAT

    def getLON():
        return Telescope.LON

    def getGearRatio(self):
        return Telescope.gearRatio

    def shutdown(self):
        # Until proper storing of angle is sorted out, telescope will reset to 0,0 on shutdown
        self.target([0, 0])
        Telescope.pi.stop()
Example #37
0
 def __init__(self):
     super(RGMP, self).__init__()
     self.Encoder = Encoder()
     self.Decoder = Decoder()
Example #38
0
    # print(text_alpha.id2string)
    print('text word size:', text_alpha.m_size)
    print('label word size:', label_alpha.m_size)
    # print(label_alpha.id2string)
    '''
        seqs to id
    '''
    #train
    text_id_list = seq2id(text_alpha, text_sent_list)
    label_id_list = seq2id(label_alpha, label_sent_list)

    #test
    # text_test_id_list = seq2id(text_alpha, text_sent_list)
    # label_test_id_list = seq2id(label_alpha, label_sent_list)

    encoder = Encoder(text_alpha.m_size, config)
    decoder = AttnDecoderRNN(label_alpha.m_size, config)

    if config.use_cuda:
        encoder = encoder.cuda()
        decoder = decoder.cuda()

    # print(encoder)
    # print(decoder)
    lr = config.lr
    encoder_optimizer = optim.Adam(encoder.parameters(), lr=lr)
    decoder_optimizer = optim.Adam(decoder.parameters(), lr=lr)
    criterion = nn.NLLLoss()

    n_epochs = config.Steps
    plot_every = 200
from Adafruit_MCP230xx import Adafruit_MCP230XX
import RPi.GPIO as GPIO

from Motor import Motor
from Encoder import Encoder
from Sonar import Sonar
from Supervisor import Supervisor
from test import *

if __name__ == '__main__':
    # MCP23017
    mcp = Adafruit_MCP230XX(busnum=1, address=0x20, num_gpios=16)

    # Objects declaration
    m = Motor(mcp)
    e = Encoder()
    s = Sonar(mcp)
    sup = Supervisor(m, e, s)

    # Initialisation
    m.init_motor()
    e.initEncoder()
    s.initSonar()

    # Ultrasonic sensors activation
    s.activate_us()

    # Vacuum cleaner starting
    m.vacuum_cleaner_start()
    m.change_speed_vacuum(50)
Example #40
0
def runPacket_4(experimentSpec):
    #consider the complex symbols and channels
    # some constants:
    packetLen = experimentSpec['packetLen']
    k = experimentSpec['spinal']['k']
    c = experimentSpec['map']['bitsPerSymbol']
    precision = experimentSpec['map']['precisionBits']
    B = experimentSpec['spinal']['B']
    d = experimentSpec['spinal']['d']
    SNR_dB = experimentSpec['channel']['SNR_dB']
    passNum = experimentSpec['protocol']['passNum']
    addEqu = 1
    tail = experimentSpec['spinal']['numLastCodeStep']
    tail = tail/2
    passNum = passNum/2
    # Message to be encoded:
    message = ''.join(random.choice(string.uppercase + string.digits) for _ in range((packetLen+7)//8))
    #pdb.set_trace();

    channelCof = experimentSpec['channel']['type']
    # initialize random number generator. this seed is fixed constant in order
    # to get deterministic results in this example.
    channelLen = len(channelCof)
    delay = experimentSpec['delay']
    channel_power = 0
    for i in range(channelLen):
        channel_power += abs(channelCof[i])**2
    channel_dev = math.sqrt(channel_power)
    channelCof = [ch/channel_dev for ch in channelCof]

    mapper = SymbolMapper(c, precision)
    map_func = lambda value: mapper.map(value)
    
    #Instantiate an encoder
    #print 'Message:', message
    #print 'Message hex:', message.encode("hex")
    encoder = Encoder(k, map_func, message,packetLen)
    
    # spine length
    n = packetLen
    spine_length = (n + (k - 1)) / k
    symbols_I = []
    symbols_Q = []
    # encode  message, add the tail
    for i in range(spine_length):
        for p in range(passNum):
            symbols_I.append(encoder.get_symbol(i))
            symbols_Q.append(encoder.get_symbol(i))
    for t in range(tail):
        symbols_I.append(encoder.get_symbol(spine_length-1))
        symbols_Q.append(encoder.get_symbol(spine_length-1))
    #print symbols_I,symbols_Q
    #make sure we got the expected result
    symbols_C = []
    for c in range(len(symbols_I)):
        symTemp = complex(symbols_I[c],symbols_Q[c])
        symbols_C.append(symTemp)
    #print symbols_C
    # get average signal power
    signal_power = mapper.get_signal_average_power()
    SNR_ratio = math.pow(10.0, SNR_dB/10.0)
    noise_power = 2.0*signal_power/ SNR_ratio
    noise_std_dev = math.sqrt(noise_power/2)
    

    
    # add white gaussian noise at 10dB to signal
    #print "Adding white gaussian noise at 10dB."
    #pdb.set_trace()
    symbols_C = signal.convolve(symbols_C, channelCof)
    noisy_symbols_all_C = [sym + complex(random.gauss(0, noise_std_dev),random.gauss(0, noise_std_dev)) for sym in symbols_C]
    #print noisy_symbols_all_P
#    for i in range(channelLen-1-delay):
#        noisy_symbols_all_P.pop()
#        noisy_symbols_all_Q.pop()
    # round to closest integer
    noisy_symbols_C = noisy_symbols_all_C[delay:-channelLen+delay+1]
    #noisy_symbols_C = [int(x + 0.5) for x in noisy_symbols_C]
    #print "noisy symbols:", noisy_symbols_C
    
    # instantiate decoder
    decoder = Decoder(k, B, d, channelLen, delay, passNum, map_func)
    #pdb.set_trace()
    # update decoder with gathered points
    for i in xrange(spine_length):
        #print i
        symbols_in = []
        if i == spine_length-1:
            for ii in range(passNum+tail):
                symbols_in.append(noisy_symbols_C[i*passNum+ii])
            #print symbols_in 
        else:
            for ii in range(passNum):
                symbols_in.append(noisy_symbols_C[i*passNum+ii])
        if addEqu:
            decoder.advance_fading(symbols_in, channelCof)
        else:
            decoder.advance(symbols_in)
    results = decoder.get_most_likely()
    error,totalBits = errorStatistics(results,message,packetLen)
    print error, totalBits
    return error, totalBits