Beispiel #1
0
def improved_process(P, codes, channels):
    improved_codes = []
    n = P.shape[1] // 3
    for c in range(len(codes) // n):
        improved_codes.append(
            np.concatenate(([codes[n * c + k] for k in range(n)])))

    # Encoding
    improved_encoder = Encoder(P)
    encodes = [improved_encoder.encode(code) for code in improved_codes]

    # Channeling
    outputs = [None] * len(channels)
    for c in range(len(channels)):
        outputs[c] = np.array(
            [channels[c].add_noise(code) for code in encodes])

    # Decoding
    improved_decoder = Decoder(P, n + 1)
    for c in range(len(channels)):
        outputs[c] = np.array(
            [improved_decoder.decode(code) for code in outputs[c]])

    return outputs
Beispiel #2
0
def evaluate(combination: tuple, keys: list, method: str, encoder: Encoder,
             y_true: np.array, y_pred: np.array, filenames: list) -> dict:
    """Compute the segments using the given parameters and then compute all the
    metrics using the sed_eval toolbox.

    Args:
        combination (tuple):
        keys (list):
        method (str):
        encoder (Encoder):
        y_true (np.array):
        y_pred (np.array):
        filenames (list):
    """
    # Transform parameters from tuple to dictionary
    # If only one parameter --> transform into tuple before
    # if not isinstance(combination, list):
    #    combination = [combination]

    combination = dict(zip(keys, combination))

    # Compute segment and transform into a csv string
    segments = encoder.encode(y_pred, method=method, **combination)
    to_evaluate = encoder.parse(segments, filenames)

    # evaluate using sed_eval
    if encoder.method == "segment_based_metrics":
        return sb_evaluator(y_true,
                            to_evaluate,
                            time_resolution=encoder.time_resolution).results()
    else:
        return eb_evaluator(
            y_true,
            to_evaluate,
            t_collar=encoder.t_collar,
            percentage_of_length=encoder.percentage_of_length).results()
 def encode(self, string):
     self.set_string(string)  ### reciving the input stiring
     encoder = Encoder(self._string,
                       self._probrangedict)  ### creating an Encoder
     return encoder.encode(
     )  ### returning the binary Tagnumber generated by encoder given the stirng
Beispiel #4
0
                str(size).encode())  # invio la dimensione del file
            if self.request.recv(1024).decode() == "OK":
                self.request.sendfile(fin)

    def finish(self):
        print("Invio completo")


if __name__ == '__main__':

    encoder = Encoder()

    encoder.set_environment()
    base_path = encoder.base_path + "/compressione/"

    encoder.encode()

    print("Pronto per inviare ...")

    files = Queue()

    for _ in range(3):
        files.put(base_path + "/tmp" + str(_))

    serv = TCPServer(('', 20000), ServerHandler)

    for n in range(2):
        t = Thread(target=serv.serve_forever)
        t.daemon = True
        t.start()
Beispiel #5
0
class NeuCube():
    """
    This class integrates all stages of the NeuCube model.
    """
    def __init__(self, input_electrodes, number_of_training_samples,
                 signal_duration, signal_timestep, simulation_timestep,
                 subject):

        self.path = os.getcwd()
        self.input_electrodes = input_electrodes
        self.number_of_training_samples = number_of_training_samples
        self.encoder = Encoder(self.path, len(input_electrodes),
                               number_of_training_samples, signal_duration,
                               signal_timestep, subject)
        self.reservoir = NeuCubeReservoir(self.path, simulation_timestep)
        self.classifier = Classifier()

    def encode_eeg_input(self, encoding_method, save_data, plot_data, subject):
        #This method encodes the EEG data in the input_stage_1 folder using the defined encoding_method and saves / plots the data if needed.
        spike_trains = self.encoder.encode(encoding_method, subject)
        if save_data or plot_data:
            rec_sig = self.encoder.decode(encoding_method)
            error = self.encoder.calc_error()
            if save_data:
                self.encoder.save_output()
                self.encoder.save_rec_sig()
            if plot_data:
                self.encoder.plot_output(encoding_method)
                self.encoder.plot_rec_sig()
        return spike_trains

    def create_reservoir(self, new_reservoir, plot_stability, input_electrodes,
                         inhibitory_split, connection_probability,
                         small_world_conn_factor, max_syn_len, w_dist_ex_mean,
                         w_dist_inh_mean, save_structure):
        if new_reservoir:
            self.reservoir.initialize_reservoir_structure(
                input_electrodes, inhibitory_split, connection_probability,
                small_world_conn_factor, max_syn_len, w_dist_ex_mean,
                w_dist_inh_mean, save_structure)
            if plot_stability:
                self.reservoir.reservoir_structure.calculate_stability(
                    inhibitory_split, w_dist_ex_mean, w_dist_inh_mean)
        else:
            self.reservoir.load_reservoir_structure()

    def train_reservoir_STDP(self, use_STDP, encoding_method, simulation_time,
                             number_of_neurons_per_core,
                             number_of_training_samples, spike_train_data,
                             tau_plus, tau_minus, A_plus, A_minus, w_min,
                             w_max, save_training_result, plot_spikes,
                             plot_voltage):
        if use_STDP:
            self.reservoir.train_network_STDP(
                encoding_method, simulation_time, number_of_neurons_per_core,
                number_of_training_samples, spike_train_data, tau_plus,
                tau_minus, A_plus, A_minus, w_min, w_max, save_training_result,
                plot_spikes, plot_voltage)

    def train_deSNN(self, load_spikes, save_reservoir_spikes, save_neurons,
                    encoding_method, simulation_time,
                    number_of_neurons_per_core, number_of_training_samples,
                    spike_train_data, tau_plus, tau_minus, A_plus, A_minus,
                    w_min, w_max, alpha, mod, drift_up, drift_down,
                    number_of_classes, plot_spikes, plot_voltage):
        print('Training the deSNN network...')
        # Read target_class_labels:
        tar_class_labels = []
        for item in csv.reader(open(
                os.path.join(self.path, 'input_stage_3',
                             'tar_class_labels.txt'), 'r'),
                               delimiter=' '):
            tar_class_labels.append(item)
        if len(tar_class_labels) < number_of_training_samples:
            print('Error: Not enough class lables for number of samples!')
        else:
            tar_class_labels = tar_class_labels[0:number_of_training_samples]
            if load_spikes:  #load spikes from storage
                for s in range(number_of_training_samples):
                    sample_spikes = self.classifier.load_reservoir_spikes(
                        os.path.join(
                            self.path, 'input_stage_3',
                            'reservoir_spikes_sam_' + str(s + 1) + '.txt'))
                    neuron = Output_Neuron(
                        sample_spikes,
                        len(self.reservoir.reservoir_structure.get_positions()
                            ), alpha, mod, drift_up, drift_down,
                        tar_class_labels[s])
                    if save_neurons:
                        neuron.save_whole_neuron(self.path, s)
                    self.classifier.add_neuron(neuron)
            else:  #create spikes from reservoir
                STDP = False  #enable/disable STDP during deSNN training
                reservoir_spikes = self.reservoir.train_network_deSNN(
                    encoding_method, simulation_time,
                    number_of_neurons_per_core, number_of_training_samples,
                    spike_train_data, tau_plus, tau_minus, A_plus, A_minus,
                    w_min, w_max, STDP, plot_spikes, plot_voltage,
                    save_reservoir_spikes)
                for s in range(number_of_training_samples):
                    sample_spikes = []
                    for spike in reservoir_spikes:
                        if spike[1] >= s * 1.5 * simulation_time and spike[
                                1] <= (s * 1.5 + 1) * simulation_time:
                            sample_spikes.append(spike)
                    neuron = Output_Neuron(
                        sample_spikes,
                        len(self.reservoir.reservoir_structure.get_positions()
                            ), alpha, mod, drift_up, drift_down,
                        tar_class_labels[s])
                    if save_neurons:
                        neuron.save_whole_neuron(self.path, s)
                    self.classifier.add_neuron(neuron)
            print('Added all samples/neurons to the deSNN classifier!')
        return self.classifier.separation(
            self.path, number_of_training_samples, number_of_classes,
            len(self.reservoir.reservoir_structure.get_positions()))

    def classify(self, subject, save_reservoir_spikes, first_test_sample_index,
                 number_of_test_samples, encoding_method, simulation_time,
                 number_of_neurons_per_core, number_of_training_samples, alpha,
                 mod, drift_up, drift_down, feature, k_neighbors):
        # Classify test samples
        labels = []
        for test_sample_index in range(
                first_test_sample_index,
                first_test_sample_index + number_of_test_samples):
            sample_EEG = self.encoder.load_sample(test_sample_index, subject)
            sample_SSA = self.encoder.encode_sample(sample_EEG,
                                                    encoding_method)
            sample_reservoir_spikes = self.reservoir.filter_sample(
                encoding_method, test_sample_index, sample_SSA,
                simulation_time, number_of_neurons_per_core,
                save_reservoir_spikes)
            test_neuron = Output_Neuron(
                sample_reservoir_spikes,
                len(self.reservoir.reservoir_structure.get_positions()), alpha,
                mod, drift_up, drift_down)
            fitting_type = 'normal'  #'normal' for fitting to all neurons, 'COM' for fitting to only center of mass vectors
            class_label = self.classifier.classify(test_neuron, feature,
                                                   k_neighbors, fitting_type)
            labels.append(class_label[0])
        print('Predicted labels for all samples: ' + str(labels))
        # Calculate Accuracy
        tar_class_labels = []
        for item in csv.reader(open(
                os.path.join(self.path, 'input_stage_3',
                             'tar_class_labels.txt'), 'r'),
                               delimiter=' '):
            tar_class_labels.append(item[0])
        count = 0
        for i in range(number_of_test_samples):
            if labels[i] == tar_class_labels[number_of_training_samples + i]:
                count += 1
        print('Real labels for all samples:      ' +
              str(tar_class_labels[first_test_sample_index -
                                   1:first_test_sample_index +
                                   number_of_test_samples - 1]))
        accuracy = count / float(number_of_test_samples)
        print('Accuracy: ' + str(accuracy))
        return (accuracy)
Beispiel #6
0
def main(_):

	"""
	Run main function
	"""

	#___________________________________________Layer info_____________________________________________________
	n = FLAGS.hidden_n

	Encoder_infos = {
						"outdim":[n,n,2*n,2*n,2*n,3*n,3*n, 3*n, 3*n],\
						"kernel":[ \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
								], \
						"stride":[ \
									[1, 1], \
									[1, 1], \
									[1, 1], \
									[2, 2], \
									[1, 1], \
									[1, 1], \
									[2, 2], \
									[1, 1], \
									[1, 1], \
								], \
					} 


	Decoder_infos = {
						"outdim":[n,n,n,n,n,n,3], \
						"kernel":[ \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
								], \
						"stride":[ \
									[1, 1], \
									[1, 1], \
									[1, 1], \
									[1, 1], \
									[1, 1], \
									[1, 1], \
									[1, 1], \
								], \
					} 

	Generator_infos = {
						"outdim":[n,n,n,n,n,n,3], \
						"kernel":[ \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
									[3, 3], \
								], \
						"stride":[ \
									[1, 1], \
									[1, 1], \
									[1, 1], \
									[1, 1], \
									[1, 1], \
									[1, 1], \
									[1, 1], \
								], \
					} 


	"""
	Prepare Image Loader
	"""
	root = "./CelebA/images"
	batch_size = FLAGS.bn
	scale_size = [FLAGS.scale_h,FLAGS.scale_w]
	data_format = "NHWC"
	loader = Image_Loader(root, batch_size, scale_size, data_format, file_type="jpg")



	"""
	Make Saving Directories
	"""
	os.makedirs("./Check_Point", exist_ok=True)
	os.makedirs("./logs", exist_ok=True) # make logs directories to save summaries
	os.makedirs("./Real_Images", exist_ok=True)
	os.makedirs("./Generated_Images", exist_ok=True)
	os.makedirs("./Decoded_Generated_Images", exist_ok=True)




	#----------------------------------------------------------------------------------------------------



	#____________________________________Model composition________________________________________

	k = tf.Variable(0.0, name = "k_t", trainable = False, dtype = tf.float32) #init value of k_t = 0
	
	
	batch = loader.queue # Get image batch tensor
	image = norm_img(batch) # Normalize Imgae
	z_G = generate_z() # Sample embedding vector batch from uniform distribution
	z_D = generate_z() # Sample embedding vector batch from uniform distribution


	E = Encoder("Encoder", Encoder_infos)
	D = Decoder("Decoder", Decoder_infos)
	G = Decoder("Generator", Generator_infos)

	#Generator
	generated_image = G.decode(z_G)
	generated_image_for_disc = G.decode(z_D, reuse = True)


	#Discriminator (Auto-Encoder)	

	#image <--AutoEncoder--> reconstructed_image_real
	embedding_vector_real = E.encode(image)
	reconstructed_image_real = D.decode(embedding_vector_real)

	#generated_image_for_disc <--AutoEncoder--> reconstructed_image_fake
	embedding_vector_fake_for_disc = E.encode(generated_image_for_disc, reuse=True)
	reconstructed_image_fake_for_disc = D.decode(embedding_vector_fake_for_disc, reuse=True)

	#generated_image <--AutoEncoder--> reconstructed_image_fake
	embedding_vector_fake = E.encode(generated_image, reuse=True)
	reconstructed_image_fake = D.decode(embedding_vector_fake, reuse=True)


	#-----------------------------------------------------------------------------------------------



	#_________________________________Loss & Summary_______________________________________________


	"""
	Define Loss
	"""
	real_image_loss = get_loss(image, reconstructed_image_real)
	generator_loss_for_disc = get_loss(generated_image_for_disc, reconstructed_image_fake_for_disc)
	discriminator_loss = real_image_loss - tf.multiply(k, generator_loss_for_disc)

	generator_loss = get_loss(generated_image, reconstructed_image_fake)
	global_measure = real_image_loss + tf.abs(tf.multiply(FLAGS.gamma,real_image_loss) - generator_loss)


	"""
	Summaries
	"""
	tf.summary.scalar('Real image loss', real_image_loss)
	tf.summary.scalar('Generator loss for discriminator', generator_loss_for_disc)
	tf.summary.scalar('Discriminator loss', discriminator_loss)
	tf.summary.scalar('Generator loss', generator_loss)
	tf.summary.scalar('Global_Measure', global_measure)
	tf.summary.scalar('k_t', k)
	

	merged_summary = tf.summary.merge_all() # merege summaries, no more summaries under this line

	#-----------------------------------------------------------------------------------------------







	#_____________________________________________Train_______________________________________________

	discriminator_parameters = []
	generator_parameters = []

	for v in tf.trainable_variables():
		if 'Encoder' in v.name:
			discriminator_parameters.append(v)
			print("Discriminator parameter : ", v.name)
		elif 'Decoder' in v.name:
			discriminator_parameters.append(v)
			print("Discriminator parameter : ", v.name)			
		elif 'Generator' in v.name:
			generator_parameters.append(v)
			print("Generator parameter : ", v.name)
		else:
			print("None of Generator and Discriminator parameter : ", v.name)

	optimizer_D = tf.train.AdamOptimizer(FLAGS.lr,beta1=FLAGS.B1,beta2=FLAGS.B2).minimize(discriminator_loss,var_list=discriminator_parameters)
	optimizer_G = tf.train.AdamOptimizer(FLAGS.lr,beta1=FLAGS.B1,beta2=FLAGS.B2).minimize(generator_loss,var_list=generator_parameters)

	with tf.control_dependencies([optimizer_D, optimizer_G]):
		k_update = tf.assign(k, tf.clip_by_value(k + FLAGS.lamb * (FLAGS.gamma*real_image_loss - generator_loss), 0, 1)) #update k_t

	init = tf.global_variables_initializer()	


	NUM_THREADS=2
	config=tf.ConfigProto(inter_op_parallelism_threads=NUM_THREADS,\
						intra_op_parallelism_threads=NUM_THREADS,\
						allow_soft_placement=True,\
						device_count = {'CPU': 1},\
						)

	# config.gpu_options.per_process_gpu_memory_fraction = FLAGS.gpu_portion



	with tf.Session(config=config) as sess:

		sess.run(init) # Initialize Variables

		coord = tf.train.Coordinator() # Set Coordinator to Manage Queue Runners
		threads = tf.train.start_queue_runners(sess, coord=coord) # Set Threads
		writer = tf.summary.FileWriter('./logs', sess.graph) # add the graph to the file './logs'		

#_______________________________Restore____________________________________

		saver = tf.train.Saver(max_to_keep=1000)
		ckpt = tf.train.get_checkpoint_state(checkpoint_dir="./Check_Point")

		
		# try :	
		# 	if ckpt and ckpt.model_checkpoint_path:
		# 		print("check point path : ", ckpt.model_checkpoint_path)
		# 		saver.restore(sess, ckpt.model_checkpoint_path)	
		# 		print('Restored!')
		# except AttributeError:
		# 		print("No checkpoint")	

		Real_Images = sess.run(denorm_img(image))
		save_image(Real_Images, '{}.png'.format("./Real_Images/Real_Image"))

#---------------------------------------------------------------------------
		for t in range(FLAGS.iteration): # Mini-Batch Iteration Loop

			if coord.should_stop():
				break
			
			_, _, l_D, l_G, l_Global, k_t = sess.run([\
													optimizer_D,\
													optimizer_G,\
													discriminator_loss,\
													generator_loss,\
													global_measure,\
													k_update,\
											   		])

			print(
				 " Step : {}".format(t),
				 " Global measure of convergence : {}".format(l_Global),
				 " Generator Loss : {}".format(l_G),
				 " Discriminator Loss : {}".format(l_D),
				 " k_{} : {}".format(t,k_t) 
				 )


			

			
	       #________________________________Save____________________________________


			if t % 200 == 0:

				summary = sess.run(merged_summary)
				writer.add_summary(summary, t)


				Generated_Images, Decoded_Generated_Images = sess.run([denorm_img(generated_image), denorm_img(reconstructed_image_fake)])
				save_image(Generated_Images, '{}/{}{}.png'.format("./Generated_Images", "Generated", t))
				save_image(Decoded_Generated_Images, '{}/{}{}.png'.format("./Decoded_Generated_Images", "AutoEncoded", t))
				print("-------------------Image saved-------------------")


			if t % 500 == 0:
				print("Save model {}th".format(t))
				saver.save(sess, "./Check_Point/model.ckpt", global_step = t)


	       #--------------------------------------------------------------------
		
		writer.close()
		coord.request_stop()
		coord.join(threads)
Beispiel #7
0
        decoder = Decoder(P, 3)
    else:
        encoder = EncoderHamming()
        decoder = DecoderHamming()
    channel = Channel(0.3)

    # Receiving code
    code = np.matrix([[int(x) for x in input()]])

    # Printing code
    print("Code received: ")
    print(code)
    input()

    # Encoding channel
    print("Encoding: ")
    encoded = encoder.encode(code)
    print(encoded)
    input()

    # Passing through channel
    print("Passing through channel: ")
    through_channel = np.array(channel.add_noise(np.array(encoded)[0]))
    print(through_channel)
    input()

    # Decoding code
    print("Decoding: ")
    decoded = decoder.decode(through_channel)
    print(decoded)