Ejemplo n.º 1
0
def train(config, params):

    if params['msg'] == True:
        model = CLS_MSG_Model(params['batch_size'], params['num_points'],
                              params['num_classes'], params['bn'])
    else:
        model = CLS_SSG_Model(params['batch_size'], params['num_points'],
                              params['num_classes'], params['bn'])

    model.build(input_shape=(params['batch_size'], params['num_points'], 3))
    print(model.summary())
    print('[info] model training...')

    optimizer = tf.keras.optimizers.Adam(lr=params['lr'])
    loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
    acc_object = tf.keras.metrics.SparseCategoricalAccuracy()

    train_ds = TFDataset(os.path.join(config['dataset_dir'], 'train.tfrecord'),
                         params['batch_size'])
    val_ds = TFDataset(os.path.join(config['dataset_dir'], 'val.tfrecord'),
                       params['batch_size'])

    train_summary_writer = tf.summary.create_file_writer(
        os.path.join(config['log_dir'], config['log_code']))

    with train_summary_writer.as_default():

        while True:

            train_pts, train_labels = train_ds.get_batch()

            loss, acc = train_step(optimizer, model, loss_object, acc_object,
                                   train_pts, train_labels)

            if optimizer.iterations % config['log_freq'] == 0:
                tf.summary.scalar('train loss',
                                  loss,
                                  step=optimizer.iterations)
                tf.summary.scalar('train accuracy',
                                  acc,
                                  step=optimizer.iterations)

            if optimizer.iterations % config['test_freq'] == 0:

                test_pts, test_labels = val_ds.get_batch()

                test_loss, test_acc = test_step(optimizer, model, loss_object,
                                                acc_object, test_pts,
                                                test_labels)

                tf.summary.scalar('test loss',
                                  test_loss,
                                  step=optimizer.iterations)
                tf.summary.scalar('test accuracy',
                                  test_acc,
                                  step=optimizer.iterations)
Ejemplo n.º 2
0
	def sdr_improvement(self):

		with tf.Graph().as_default() as graph:
			
			config_ = tf.ConfigProto()
			config_.gpu_options.allow_growth = True
			config_.allow_soft_placement = True

			with tf.Session(graph=graph, config=config_).as_default() as sess:

				tfds = TFDataset(**self.args)

				additional_args = {
					"mix": tfds.next_mix,
					"non_mix": tfds.next_non_mix,
					"ind": tfds.next_ind,
					"pipeline": True,
					"tot_speakers" : 251
				}

				self.args.update(additional_args)
				self.build()

				print self.args

				nb_batches_test = tfds.length(tfds.TEST)
				feed_dict_test = {tfds.handle: tfds.get_handle(tfds.TEST)}

				sess.run(tfds.test_initializer)

				for b in range(nb_batches_test):
					output = self.model.improvement(feed_dict_test, b)
					yield output
					print 'Batch #', b+1, '/', nb_batches_test
    def test(self):

        with tf.Graph().as_default() as graph:
            tfds = TFDataset(**self.args)

            additional_args = {
                "mix": tfds.next_mix,
                "non_mix": tfds.next_non_mix,
                "ind": tfds.next_ind,
                "pipeline": True,
                "tot_speakers": 251
            }

            self.args.update(additional_args)

            config_ = tf.ConfigProto()
            config_.gpu_options.allow_growth = True
            config_.allow_soft_placement = True

            with tf.Session(graph=graph, config=config_).as_default() as sess:

                self.build()

                nb_batches_test = tfds.length('test')
                feed_dict_test = {tfds.handle: tfds.get_handle('test')}

                step = 0
                sess.run(tfds.training_initializer)

                for b in range(nb_batches_test):
                    c = self.model.test_batch(feed_dict_test, step)
                    print 'Batch #', b + 1, '/', nb_batches_test, 'sec loss=', c
    def inference(self):

        with tf.Graph().as_default() as graph:

            config_ = tf.ConfigProto()
            config_.gpu_options.allow_growth = True
            config_.allow_soft_placement = True

            with tf.Session(graph=graph, config=config_).as_default() as sess:

                tfds = TFDataset(**self.args)

                additional_args = {
                    "mix": tfds.next_mix,
                    "non_mix": tfds.next_non_mix,
                    "ind": tfds.next_ind,
                    "pipeline": True,
                    "tot_speakers": 251
                }

                self.args.update(additional_args)
                self.build()

                # nb_batches_test = tfds.length(tfds.TEST)
                # feed_dict_test = {tfds.handle: tfds.get_handle(tfds.TEST), tfds.chunk_size: self.args['chunk_size']}

                if self.args["out"]:
                    nb_batches_test = tfds.length(tfds.TEST_OTHER)
                    feed_dict_test = {
                        tfds.handle: tfds.get_handle(tfds.TEST_OTHER),
                        tfds.chunk_size: self.args['chunk_size']
                    }
                    sess.run(
                        tfds.get_initializer(tfds.TEST_OTHER),
                        feed_dict={tfds.chunk_size: self.args['chunk_size']})
                else:
                    nb_batches_test = tfds.length(tfds.TEST)
                    feed_dict_test = {
                        tfds.handle: tfds.get_handle(tfds.TEST),
                        tfds.chunk_size: self.args['chunk_size']
                    }
                    sess.run(
                        tfds.test_initializer,
                        feed_dict={tfds.chunk_size: self.args['chunk_size']})

                for b in range(nb_batches_test):
                    output = self.model.infer(feed_dict_test, b)
                    print 'Batch #', b + 1, '/', nb_batches_test,
                    yield output
Ejemplo n.º 5
0
	def train(self):

		print 'Total name :' 

		nb_epochs = self.args['epochs']
		time_spent = [0 for _ in range(10)]
		
		with tf.Graph().as_default() as graph:
			config_ = tf.ConfigProto()
			config_.gpu_options.allow_growth = True
			config_.allow_soft_placement = True

			with tf.Session(graph=graph, config=config_).as_default() as sess:

				tfds = TFDataset(**self.args)

				additional_args = {
					"mix": tfds.next_mix,
					"non_mix": tfds.next_non_mix,
					"ind": tfds.next_ind,
					"pipeline": True,
					"tot_speakers" : 251
				}

				self.args.update(additional_args)

				self.build()


				nb_batches_train = tfds.length(tfds.TRAIN)#894 # tfds.length('train')
				nb_batches_test = tfds.length(tfds.TEST)#50 #tfds.length('test')
				nb_batches_valid = tfds.length(tfds.VALID)#50#tfds.length('valid')

				print 'BATCHES'
				print nb_batches_train, nb_batches_test, nb_batches_valid

				feed_dict_train = {tfds.handle: tfds.get_handle(tfds.TRAIN)}
				feed_dict_valid = {tfds.handle: tfds.get_handle(tfds.VALID)}
				feed_dict_test = {tfds.handle: tfds.get_handle(tfds.TEST)}

				best_validation_cost = 1e100

				t1 = time.time()

				step = 0

				for epoch in range(nb_epochs):
					sess.run(tfds.training_initializer)

					for b in range(nb_batches_train):

						t = time.time()
						# m =  self.model.test(feed_dict_train)
						# print m

						c = self.model.train(feed_dict_train, step)
								
						if (step+1)%self.args['validation_step'] == 0:
							t = time.time()

							sess.run(tfds.validation_initializer)

							# Compute validation mean cost with batches to avoid memory problems
							costs = []

							for b_v in range(nb_batches_valid):
								cost = self.model.valid_batch(feed_dict_valid,step)
								costs.append(cost)

							valid_cost = np.mean(costs)
							self.model.add_valid_summary(valid_cost, step)

							# Save the model if it is better:
							if valid_cost < best_validation_cost:
								best_validation_cost = valid_cost # Save as new lowest cost
								best_path = self.model.save(step)
								print 'Save best model with :', best_validation_cost

							t_f = time.time()
							print 'Validation set tested in ', t_f - t, ' seconds'
							print 'Validation set: ', valid_cost

						time_spent = time_spent[1:] +[time.time()-t1]
						avg =  sum(time_spent)/len(time_spent)
						print 'Epoch #', epoch+1,'/', nb_epochs,' Batch #', b+1,'/',nb_batches_train,'in', avg,'sec loss=', c \
							, ' ETA = ', getETA(avg, nb_batches_train, b+1, nb_epochs, epoch+1)

						t1 = time.time()

						step += 1

				print 'Best model with Validation:  ', best_validation_cost
				print 'Path = ', best_path

				# Load the best model on validation set and test it
				self.model.restore_last_checkpoint()
				
				sess.run(tfds.test_initializer)

				for b_t in range(nb_batches_test):
					cost = self.model.test_batch(feed_dict_test)
					costs.append(cost)
				print 'Test cost = ', np.mean(costs)