Example #1
0
 def find(self, data, epochs=1, start_lr=1e-7, end_lr=10., verbose=1):
 self.lrs = []
 self.losses = []
 self.avg_loss = 0
 self.best_loss = 1e9
 self.batch = 0
 
 its = epochs * len(data)
 self.lr_mult = (end_lr / start_lr) ** (1. / its)
 
 self.weights_file = tempfile.mkstemp()[1]
 self.model.save_weights(self.weights_file)
 
 orig_lr = K.get_value(self.model.optimizer.lr)
 K.set_value(self.model.optimizer.lr, start_lr)
 
 cb = LambdaCallback(on_batch_end=lambda batch,logs: self.on_batch_end(batch, logs))
 
 self.model.fit(
     x=data,
     epochs=epochs,
     verbose=verbose,
     callbacks=[cb]
 )
 
 self.model.load_weights(self.weights_file)
 K.set_value(self.model.optimizer.lr, orig_lr)
Example #2
0
def main(pars=None):

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train, x_test = x_train / 255.0, x_test / 255.0

    model = tf.keras.models.Sequential([
        tf.keras.layers.Flatten(input_shape=(28, 28)),
        tf.keras.layers.Dense(pars['hidden'], activation=tf.nn.relu),
        tf.keras.layers.Dropout(0.2),
        tf.keras.layers.Dense(10, activation=tf.nn.softmax)
    ])

    logs = LambdaCallback(
        on_epoch_end=lambda epoch, logs: job.log({
            'ep': epoch,
            'acc': logs['acc'],
            'loss': logs['loss']
        }))

    model.compile(optimizer='adam',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x_train, y_train, epochs=pars['epochs'], callbacks=[logs])
    results = model.evaluate(x_test, y_test)

    print('loss, acc test:', results)
    job.log({'acc': results[1], 'loss': results[0], 'type': 1})
Example #3
0
    def find_lr(self,
                X,
                y,
                num_samples,
                batch_size,
                epochs=5,
                min_lr=1e-6,
                max_lr=10):
        '''Metoda za traženje optimalnih granica stope učenja. Potrebno je definirati
        minimalnu i maksimalnu stopu učenja unutar čijih granica se kreće stopa učenja.
        Kao ulaz može primiti podatke koji stanu u memoriju, ali i one
        koji se obrađuju online. y=None, ako koristimo tf.data.dataset. 
        Potrebno definirati broj opažanja, te veličinu mini grupe i broj epoha.'''

        #Čišćenje postojećeg stanja
        self._reset_state()

        if self.weights_file is not None:
            print(
                "[INFO] spremam početne vrijednosti težina i inicijalnu stopu učenja..."
            )
            self.init_lr = K.get_value(self.model.optimizer.lr)
            self.model.save_weights(self.weights_file)

        print("[INFO] tražim optimalne granice stope učenja...")
        #Izračun broja iteracija
        iterations = np.ceil(num_samples / batch_size) * epochs
        #Izračun faktora povećanja stope učenja
        self.factor = self.clc_factor(min_lr, max_lr, iterations)
        #Postavljanje stope učenja na definiranu minimalnu vrijednost stope učenja
        K.set_value(self.model.optimizer.lr, min_lr)
        #Definiranje callbacka za bilježenje metrike po završetku mini grupe,
        #podešavanje stope učenja, i zaustavljanje u slučaju prevelikog rasta f.gubitka
        callback = LambdaCallback(
            on_batch_end=lambda batch, logs: self._on_batch_end(batch, logs))

        #U slučaju da je ulaz tf.data.dataset
        if y is None:
            self.model.fit(x=X, y=None, epochs=epochs, callbacks=[callback])
        else:
            self.model.fit(x=X,
                           y=y,
                           batch_size=batch_size,
                           epochs=epochs,
                           callbacks=[callback])

        print(
            "[INFO] postupak traženja optimalnih granica stope učenja završen")
        print(
            "[INFO] generirajte plot i odaberite optimalne granice prije novog ciklusa učenja"
        )

        if self.weights_file is None:
            print("[INFO] ponovno inicijalizirajte model sa istim seed-om")
        else:
            print(
                "[INFO] vraćam početne težine modela i inicijalnu stopu učenja"
            )
            K.set_value(self.model.optimizer.lr, self.init_lr)
            self.model.load_weights(self.weights_file)
Example #4
0
 def __init__(self,
              img_wt,
              img_ht,
              flow: Flow = None,
              hidden_units=32,
              z_size=64,
              encoder_strides=[2, 2],
              decoder_strides=[2, 2],
              callbacks=[],
              metrics=[],
              output_activation='sigmoid',
              loss='binary_crossentropy',
              beta_update_fn=None):
     super(GatedConvVAE, self).__init__()
     if beta_update_fn is None:
         beta_update_fn = lambda i, beta: 1.0E-2 * i
     self.flow = flow
     self.hidden_units = hidden_units
     self.z_size = z_size
     self.num_downsamples = len(encoder_strides)
     self.num_upsamples = len(decoder_strides)
     self.encoder_strides = encoder_strides
     self.decoder_strides = decoder_strides
     self.output_activation = output_activation
     self.encoder = self._create_encoder(img_wt, img_ht)
     self.decoder, self.flow_layer = self._create_decoder(img_wt, img_ht)
     beta_update = LambdaCallback(on_epoch_begin=lambda i, _:
                                  beta_update_fn(i, self.flow_layer.beta))
     decoder_output = self.decoder(self.encoder(self.encoder.inputs))
     self.model = Model(inputs=self.encoder.inputs,
                        outputs=decoder_output[0])
     self.model.compile(loss=loss,
                        optimizer=Adamax(learning_rate=1.0E-4, clipnorm=1.),
                        callbacks=[beta_update] + callbacks,
                        metrics=metrics)
Example #5
0
def main():

    text = get_text_file()

    chars, char2index, index2char = create_vocabulary_dictionary(text)

    x, y, maxlen = create_syllable(text, chars, char2index)

    model = create_model(maxlen, chars)
    """ learn 1 time (1 epoch) """
    def on_epoch_end(epoch, _):
        print(f'\nEpoch: {epoch}')
        start_index = random.randint(0, len(text) - maxlen - 1)
        for diversity in [0.2, 0.5, 1.0, 1.2]:
            print('\nDiversity: ', diversity)
            generated = ''
            sentence = text[start_index:start_index + maxlen]
            generated += sentence
            print(f"Seed: {sentence}")
            sys.stdout.write(generated)
            for i in range(400):
                x_pred = np.zeros((1, maxlen, len(chars)))
                for t, char in enumerate(sentence):
                    x_pred[0, t, char2index[char]] = 1.
                preds = model.predict(x_pred, verbose=0)[0]
                next_index = sample(preds, diversity)
                next_char = index2char[next_index]
                sentence = sentence[1:] + next_char
                sys.stdout.write(next_char)
                sys.stdout.flush()

    print_callback = LambdaCallback(on_epoch_end=on_epoch_end)
    model.fit(x, y, batch_size=128, epochs=30, callbacks=[print_callback])
Example #6
0
 def fit(
         self,
         batch_size=10000,
         epochs=300
 ):
     name_generator = LambdaCallback(on_epoch_end=generate_name_loop)
     self.model.fit(self.X, self.Y, batch_size=batch_size, epochs=epochs, callbacks=[name_generator], verbose=1)
    def find(self,
             generator,
             start_lr=1e-7,
             end_lr=10.,
             beta=0.98,
             num_iter=100,
             **kwargs):
        # calculate number of epochs
        num_epochs = math.ceil(num_iter / len(generator))

        # calculate lr multiplier
        self.lr_multiplier = (end_lr / start_lr)**(1. / num_iter)

        # save initial state
        orig_lr = float(K.get_value(self.model.optimizer.lr))
        self.model.save_weights('orig.h5')

        # set current lr as start_lr
        K.set_value(self.model.optimizer.lr, start_lr)

        # train model with callback based pn self.on_batch_end()
        cb = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(
            batch, logs, beta=beta))
        self.model.fit_generator(generator=generator,
                                 epochs=num_epochs,
                                 callbacks=[cb],
                                 **kwargs)

        # restore initial state
        K.set_value(self.model.optimizer.lr, orig_lr)
        self.model.load_weights('orig.h5')
Example #8
0
    def find_generator(self, generator, start_lr, end_lr, epochs=1, steps_per_epoch=None, **kw_fit):
        if steps_per_epoch is None:
            try:
                steps_per_epoch = len(generator)
            except (ValueError, NotImplementedError) as e:
                raise e('`steps_per_epoch=None` is only valid for a'
                        ' generator based on the '
                        '`keras.utils.Sequence`'
                        ' class. Please specify `steps_per_epoch` '
                        'or use the `keras.utils.Sequence` class.')
        self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(epochs * steps_per_epoch))

        # Save weights into a file
        self.model.save_weights('tmp.h5')

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.learning_rate)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.learning_rate, start_lr)

        callback = LambdaCallback(on_batch_end=lambda batch,
                                                      logs: self.on_batch_end(batch, logs))

        self.model.fit_generator(generator=generator,
                                 epochs=epochs,
                                 steps_per_epoch=steps_per_epoch,
                                 callbacks=[callback],
                                 **kw_fit)

        # Restore the weights to the state before model fitting
        self.model.load_weights('tmp.h5')

        # Restore the original learning rate
        K.set_value(self.model.optimizer.learning_rate, original_lr)
Example #9
0
def обучение_модели_трафик(мод, ген1, ген2, количество_эпох=None):
  filepath="model.h5"
  model_checkpoint_callback = ModelCheckpoint(
    filepath=filepath,
    save_weights_only=True,
    monitor='val_loss',
    mode='min',
    save_best_only=True)
    
  cur_time = time.time()
  def on_epoch_end(epoch, log):
    k = list(log.keys())
    global cur_time
    p1 = 'Эпоха №' + str(epoch+1)
    p2 = p1 + ' '* (10 - len(p1)) + 'Время обучения: ' + str(round(time.time()-cur_time,2)) +'c'
    p3 = p2 + ' '* (33 - len(p2)) + 'Ошибка на обучающей выборке: ' + str(round(log[k[0]],5))
    p4 = p3 + ' '* (77 - len(p3)) + 'Ошибка на прверочной выборке: ' + str(round(log[k[1]],5))
    print(p4)
    cur_time = time.time()
  def on_epoch_begin(epoch, log):
    global cur_time
    cur_time = time.time()
  myCB = LambdaCallback(on_epoch_end = on_epoch_end, on_epoch_begin=on_epoch_begin)

  history = мод.fit_generator(ген1, epochs=количество_эпох, verbose=0, validation_data=ген2, callbacks=[model_checkpoint_callback, myCB])
  
  plt.plot(history.history['loss'], label='Среднеквадратическая ошибка на обучающем наборе')
  plt.plot(history.history['val_loss'], label='Среднеквадратическая ошибка на проверочном наборе')
  plt.ylabel('Средняя ошибка')
  plt.legend()
    def find(self,
             x_train,
             y_train,
             start_lr,
             end_lr,
             batch_size=64,
             epochs=1):
        num_batches = epochs * x_train.shape[0] / batch_size
        self.lr_mult = (float(end_lr) / float(start_lr))**(float(1) /
                                                           float(num_batches))

        # Save weights into a file
        self.model.save_weights('tmp.h5')

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.lr)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.lr, start_lr)

        callback = LambdaCallback(
            on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))

        self.model.fit(x_train,
                       y_train,
                       batch_size=batch_size,
                       epochs=epochs,
                       callbacks=[callback])

        # Restore the weights to the state before model fitting
        self.model.load_weights('tmp.h5')

        # Restore the original learning rate
        K.set_value(self.model.optimizer.lr, original_lr)
Example #11
0
    def onTraining(self, monitor_handler):
        """
        Callback for training
        Params:
            monitor_handler: MonitorHandler object for train/test status monitoring
            (see https://www.ibm.com/support/knowledgecenter/SSRU69_1.1.3/base/vision_custom_api.html
                section "Monitoring and reporting statistics")
        Return: None
        """
        log.info("CALL MyTrain.onTraining")

        # function that takes logs (dictionnary containing loss and accuracy values)
        # and calls the monitor_handler methods to update metrics:
        #   * training loss (in updateTrainMetrics)
        #   * testing loss and accuracy (in updateTestMetrics)
        # allowing live graph plot in PowerAI Vision during training
        def logMetrics(epoch, logs):
            current_iter = (epoch + 1) * self.dataset_size / BATCH_SIZE
            monitor_handler.updateTrainMetrics(current_iter,
                                               int(self.params["max_iter"]),
                                               logs["loss"], epoch + 1)
            monitor_handler.updateTestMetrics(current_iter, logs["val_acc"],
                                              logs["val_loss"], epoch + 1)

        # launch training using the data we loaded in `onPreprocessing`
        # at the end of each epoch, call the `logMetrics` function as a callback
        # see https://keras.io/callbacks/
        self.model.fit(self.X_train,
                       self.y_train,
                       batch_size=BATCH_SIZE,
                       epochs=int(
                           int(self.params["max_iter"]) * BATCH_SIZE /
                           self.dataset_size),
                       validation_data=(self.X_test, self.y_test),
                       callbacks=[LambdaCallback(on_epoch_end=logMetrics)])
Example #12
0
    def get_similarity_tests_callbacks(self,
                                       tests_dict,
                                       mode_list,
                                       metric_list,
                                       job_dir,
                                       log_dir=None):
        callback_list = []

        if log_dir:
            log_path = os.path.join(job_dir, 'logs', log_dir)
        else:
            print(
                'Logging directory for similarity tests is not specified, writing to `logs/temp` directory'
            )
            log_path = os.path.join(job_dir, 'logs', 'temp')
        os.makedirs(log_path, exist_ok=True)

        def callback_factory(mode,
                             metric,
                             verbose=False,
                             ignore_oov=True,
                             delimeter='\t'):
            def similarity_scalar(epoch, logs):
                if verbose:
                    print('Running similarity tests...', end='')
                res = self.fetch_similarity_results(tests_dict,
                                                    mode,
                                                    metric,
                                                    verbose=verbose,
                                                    ignore_oov=ignore_oov,
                                                    delimeter=delimeter)

                if not verbose:
                    file_writer = tf.summary.create_file_writer(
                        os.path.join(log_path, 'metrics', metric, mode))
                    file_writer.set_as_default()

                for test_name in res.keys():
                    _, s, _ = res[test_name]
                    if verbose:
                        print(f"{test_name}: {s:.2f}. Epoch: {epoch}")
                    else:
                        tf.summary.scalar(os.path.join(
                            f'similarity_{test_name}', metric, mode),
                                          data=s,
                                          step=epoch)
                        # TODO: add this back, need to define model.name field
                        # tf.summary.scalar(os.path.join(f'similarity_{test_name}', 'regimes', self.name), data = s, step = epoch)
                print('\r', end='')

            return similarity_scalar

        # create a log for each (mode, metric) pair
        for mode in mode_list:
            for metric in metric_list:
                callback_func = callback_factory(mode, metric)
                callback = LambdaCallback(on_epoch_end=callback_func)
                callback_list.append(callback)

        return callback_list
Example #13
0
    def compile(self):
        d_optimizer = Adam(
            lr=.00001, beta_1=.5
        )  # lower lr necessary to keep discriminator from getting too much better than generator
        gan_optimizer = Adam(lr=.0001, beta_1=.5)

        self.discriminator.trainable = True
        self.concated_discriminator.trainable = True
        self.discriminator.compile(optimizer=d_optimizer,
                                   loss='binary_crossentropy')
        self.discriminator.trainable = False
        self.concated_discriminator.trainable = False

        # don't need to worry about setting trainable because all layers not shared with discriminator are non-trainable
        self.concated_discriminator.compile(optimizer=d_optimizer,
                                            loss='binary_crossentropy')

        loss = ['binary_crossentropy', 'mse']
        loss_weights = [100, 1]
        self.gan.compile(optimizer=gan_optimizer,
                         loss=loss,
                         loss_weights=loss_weights)
        self.discriminator.trainable = True
        self.concated_discriminator.trainable = True

        # only the progress callback gets used right now, as I'm not sure how to make this work with a custom training loop
        progress_callback = LambdaCallback(on_epoch_end=report_epoch_progress)
        checkpoint_callback = ModelCheckpoint('./model-checkpoint.ckpt')
        tensorboard_callback = TensorBoard(log_dir='../logs/tensorboard-logs',
                                           write_images=True)
        self.callbacks = [progress_callback]
        self.gan.summary()
Example #14
0
    def get_save_callbacks(self, save_path, args, period=1):
        if save_path is None:
            if args.save_dir is None:  # don't save the model
                return []
            save_path = os.path.join(args.job_dir, 'saved_models',
                                     args.save_dir)  # default save_path

        callbacks = []

        def save_config_callback_factory(save_path, args):
            def save_train_config(epoch, logs):
                if (epoch + 1) % period == 0:
                    self.save_train_config(
                        save_path, epoch + 1,
                        args)  # note that epochs_trained = epoch+1

            return save_train_config

        ckpt_path = os.path.join(save_path, 'cp-{epoch:04d}.ckpt')
        # save_weights_only = False has a bug, hopefully will get resolved:
        # https://github.com/tensorflow/tensorflow/issues/39679#event-3376275799
        # at any rate, saving custom models requires some effort, so rely on load_weights
        cp_ckpt = tf.keras.callbacks.ModelCheckpoint(filepath=ckpt_path,
                                                     save_weights_only=True,
                                                     verbose=1,
                                                     max_to_keep=5,
                                                     period=period)
        callbacks.append(cp_ckpt)
        callbacks.append(
            LambdaCallback(
                on_epoch_end=save_config_callback_factory(save_path, args)))

        return callbacks
Example #15
0
	def find(self, training_data, start_lr=1e-10, end_lr=1e+1, batch_size=32, epochs=5, sample_size=None, verbose=1):
		# Reset parameters
		self.reset()
		
		# If sample size is not defined, use length of training data
		if sample_size is None:
			sample_size = len(training_data[0])
			
		# Calculate update rate for learning rate
		updateTimePerEpoch = np.ceil(sample_size / float(batch_size))
		updateTimeTotal = epochs * updateTimePerEpoch
		self.lrMult = (end_lr / start_lr) ** (1.0 / updateTimeTotal)
			
		# Save model weights and learning rate, so we can reset it later
		weightsFile = tempfile.mkstemp()[1]
		self.model.save_weights(weightsFile)
		orig_lr = K.get_value(self.model.optimizer.lr)
		
		# Create callback function to update learning rate every batch
		callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))
		
		# Run training
		K.set_value(self.model.optimizer.lr, start_lr)
		self.model.fit(training_data[0], training_data[1],
					   batch_size=batch_size,
					   epochs=epochs,
					   verbose=verbose,
					   callbacks=[callback])
		
		# Load model weights back
		self.model.load_weights(weightsFile)
		K.set_value(self.model.optimizer.lr, orig_lr)
Example #16
0
def train_generator(generator, discriminator, gan):
    wandb_logging_callback = LambdaCallback(on_epoch_end=log_generator)
    discriminator.trainable = False
    gan.fit_generator(gan_image_generator,
                      epochs=1,
                      steps_per_epoch=config.steps_per_epoch,
                      callbacks=[wandb_logging_callback])
Example #17
0
def train_generator(generator, descriminator, joint_model, config, save_file):
    num_examples = config.generator_examples

    train = generator_inputs(num_examples, config)
    labels = np.ones(num_examples)
    #
    # labels = to_categorical(labels)
    # print(labels)
    generator.trainable = True
    descriminator.trainable = False

    wandb_logging_callback = LambdaCallback(on_epoch_end=log_generator)

    joint_model.fit(x=train,
                    y=labels,
                    epochs=config.generator_epochs,
                    batch_size=config.batch_size,
                    callbacks=[wandb_logging_callback],
                    workers=10,
                    use_multiprocessing=True)

    save_path = os.path.join(pathlib.Path().absolute(), 'ml_model/models/',
                             save_file)
    generator.save(save_path)
    print('saving generator', save_path, os.path.exists(save_path))
Example #18
0
    def find(self, x_train, y_train, start_lr, end_lr, batch_size=64, epochs=1):
        # If x_train contains data for multiple inputs, use length of the first input.
        # Assumption: the first element in the list is single input; NOT a list of inputs.
        N = x_train[0].shape[0] if isinstance(x_train, list) else x_train.shape[0]

        # Compute number of batches and LR multiplier
        num_batches = epochs * N / batch_size
        self.lr_mult = (float(end_lr) / float(start_lr)) ** (float(1) / float(num_batches))
        # Save weights into a file
        self.model.save_weights('tmp.h5')

        # Remember the original learning rate
        original_lr = K.get_value(self.model.optimizer.learning_rate)

        # Set the initial learning rate
        K.set_value(self.model.optimizer.learning_rate, start_lr)

        callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))

        self.model.fit(x_train, y_train,
                       batch_size=batch_size, epochs=epochs,
                       callbacks=[callback])

        # Restore the weights to the state before model fitting
        self.model.load_weights('tmp.h5')

        # Restore the original learning rate
        K.set_value(self.model.optimizer.learning_rate, original_lr)
Example #19
0
def обучение_модели(модель, x_train, y_train, x_test=[], y_test=[], batch_size=None, epochs=None, коэф_разделения = 0.2):
  if batch_size == None:
    batch_size = int(x_train.shape[0] * 0.01)
  if epochs == None:
    epochs = 10
  filepath="model.h5"
  model_checkpoint_callback = ModelCheckpoint(
    filepath=filepath,
    save_weights_only=True,
    monitor='val_loss',
    mode='min',
    save_best_only=True)
    
  cur_time = time.time()
  def on_epoch_end(epoch, log):
    k = list(log.keys())
    global cur_time
    p1 = 'Эпоха №' + str(epoch+1)
    p2 = p1 + ' '* (10 - len(p1)) + 'Время обучения: ' + str(round(time.time()-cur_time,2)) +'c'
    p3 = p2 + ' '* (33 - len(p2)) + 'Точность на обучающей выборке: ' + str(round(log[k[1]]*100,2))+'%'
    if len(k)>2:
        p4 = p3 + ' '* (77 - len(p3)) + 'Точность на прверочной выборке: ' + str(round(log[k[3]]*100,2))+'%'
        print(p4)
    else:
        print(p3)
    cur_time = time.time()
  def on_epoch_begin(epoch, log):
    global cur_time
    cur_time = time.time()
  myCB = LambdaCallback(on_epoch_end = on_epoch_end, on_epoch_begin=on_epoch_begin)
  

  if len(x_test)==0:
    model_checkpoint_callback = ModelCheckpoint(
        filepath=filepath,
        save_weights_only=True,
        monitor='loss',
        mode='min',
        save_best_only=True)
    history = модель.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, callbacks=[model_checkpoint_callback, myCB], verbose = 0)
  else:
    history = модель.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data = (x_test, y_test), callbacks=[model_checkpoint_callback, myCB], verbose = 0)
  модель.load_weights('model.h5')
  модель.save('model_s.h5')
  plt.figure(figsize=(12, 6)) # Создаем полотно для визуализации
  keys = list(history.history.keys())
  plt.plot(history.history[keys[0]], label ='Обучающая выборка') # Визуализируем график ошибки на обучающей выборке
  if len(keys)>2:
    plt.plot(history.history['val_'+keys[0]], label ='Проверочная выборка') # Визуализируем график ошибки на проверочной выборке
  plt.legend() # Выводим подписи на графике
  plt.title('График ошибки обучения') # Выводим название графика
  plt.show()
  plt.figure(figsize=(12,6)) # Создаем полотно для визуализации
  plt.plot(history.history[keys[1]], label ='Обучающая выборка') # Визуализируем график точности на обучающей выборке
  if len(keys)>2:
    plt.plot(history.history['val_'+keys[1]], label ='Проверочная выборка') # Визуализируем график точности на проверочной выборке
  plt.legend() # Выводим подписи на графике
  plt.title('График точности обучения') # Выводим название графика
  plt.show()
Example #20
0
def vae_custom_loss(z, beta_fn, free_bits=0, kl_weight=1, run=None):
    """Function for getting function to calculate kl loss + xent loss
    
    Arguments:
    z -- list containing [z_mean, z_log_sigma]
    beta_update -- function of epochs that returns beta, for updating beta in keras callback, OR an int/float
    free_bits -- alowance of free bits before kl loss starts impacting loss
    kl_weight -- weight to give to kl part of loss
    
    Returns:
    vae_loss -- function for evaluating loss
    beta_cb -- keras callback for updating beta

    Notes:
    Made with wonderful help of this: https://blog.keras.io/building-autoencoders-in-keras.html
    I had trouble with this function - I was using it with a sampling function that produced z_mean
    and z_LOG_sigma, rather than z_sigma, and no activation on those in the dense layers that produced them.


    """

    # for implementation of free_nats, see https://github.com/tensorflow/magenta/blob/master/magenta/models/music_vae/base_model.py
    free_nats = free_bits * tf.math.log(2.0)
    # for implementation of weighting kl loss according to epoch, see https://stackoverflow.com/questions/42787181/variationnal-auto-encoder-implementing-warm-up-in-keras/
    # beta is used for this
    if isinstance(beta_fn, (int, float)):
        beta = tf.keras.backend.variable(value=beta_fn)
    else:
        beta = tf.keras.backend.variable(value=0.0)
    z_mean, z_log_sigma = z
    # see https://stackoverflow.com/questions/42787181/variationnal-auto-encoder-implementing-warm-up-in-keras/

    beta_cb = None

    if not isinstance(beta_fn, (int, float)):

        def update_beta(epoch):
            """function for updating beta value according to epoch number"""
            value = beta_fn(epoch)
            print("beta:", value)
            K.set_value(beta, value)
            if run != None:
                run.log_scalar("beta", float(value))

        beta_cb = LambdaCallback(
            on_epoch_begin=lambda epoch, log: update_beta(epoch))

    def vae_loss(y_true, y_pred):
        xent_loss = tf.keras.losses.categorical_crossentropy(y_true, y_pred)
        kl_loss = tf.maximum(
            kl_weight *
            (-0.5 *
             K.mean(1 + z_log_sigma - K.square(z_mean) - K.exp(z_log_sigma),
                    axis=-1)) - free_nats, 0)
        # need this for training on batches, see here: https://github.com/keras-team/keras/issues/10155
        kl_loss = K.mean(kl_loss)
        return beta * kl_loss + xent_loss

    return vae_loss, beta_cb
Example #21
0
 def fit_model(self, x, y, batch_size, epochs):
     print("\nT R A I N I N G - M O D E L\n")
     print_callback = LambdaCallback(on_epoch_end=self.on_epoch_end)
     self.model.fit(x,
                    y,
                    batch_size=batch_size,
                    epochs=epochs,
                    callbacks=[print_callback])
    def find(self,
             trainData,
             startLR,
             endLR,
             epochs=None,
             stepsPerEpoch=None,
             batchSize=32,
             sampleSize=2048,
             verbose=1):

        self.reset()

        useGen = self.is_data_iter(trainData)

        if useGen and stepsPerEpoch is None:
            msg = "Using generator without supplying stepsPerEpoch"
            raise Exception(msg)

        elif not useGen:
            numSamples = len(trainData[0])
            stepsPerEpoch = np.ceil(numSamples / float(batchSize))

        if epochs is None:
            epochs = int(np.ceil(sampleSize / float(stepsPerEpoch)))

        numBatchUpdates = epochs * stepsPerEpoch

        self.lrMult = (endLR / startLR)**(1.0 / numBatchUpdates)

        self.weightsFile = tempfile.mkstemp()[1]
        self.model.save_weights(self.weightsFile)

        origLR = K.get_value(self.model.optimizer.lr)
        K.set_value(self.model.optimizer.lr, startLR)

        callback = LambdaCallback(
            on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))

        if useGen:
            self.model.fit_generator(trainData,
                                     steps_per_epoch=stepsPerEpoch,
                                     epochs=epochs,
                                     verbose=verbose,
                                     callbacks=[callback])
        else:

            self.model.fit(trainData[0],
                           trainData[1],
                           batch_size=batchSize,
                           epochs=epochs,
                           callbacks=[callabck],
                           verbose=verbose)

        self.model.load_weights(self.weightsFile)
        K.set_value(self.model.optimizer.lr, origLR)
Example #23
0
    def find(self, trainData, startLR, endLR, epochs=None, stepsPerEpoch=None, batchSize=32,
             sampleSize=2048, classWeight=None, verbose=1):
        self.reset()

        # determine if we are using a data generator or not
        useGen = self.is_data_iter(trainData)
        # if we're using a generator and the steps per epoch is not supplied, raise an error
        if useGen and stepsPerEpoch is None:
            msg = "Using generator without supplying stepsPerEpoch"
            raise Exception(msg)
        # if we're not using a generator then our entire dataset must already be in memory
        elif not useGen:
            # grab the number of samples in the training data and then derive the number of steps per epoch
            numSamples = len(trainData[0])
            stepsPerEpoch = np.ceil(numSamples / float(batchSize))

        # if no number of training epochs are supplied, compute the training epochs based on a default sample size
        if epochs is None:
            epochs = int(np.ceil(sampleSize / float(stepsPerEpoch)))

        # compute the total number of batch updates that will take place 
        # while we are attempting to find a good starting learning rate
        numBatchUpdates = epochs * stepsPerEpoch
        # derive the learning rate multiplier based on the ending learning rate,
        # starting learning rate, and total number of batch updates
        self.lrMult = (endLR / startLR) ** (1.0 / numBatchUpdates)

        # create a temporary file path for the model weights and then save the weights
        # (so we can reset the weights when we are done)
        self.weightsFile = tempfile.mkstemp()[1]
        self.model.save_weights(self.weightsFile)

        # grab the *original* learning rate (so we can reset it
        # later), and then set the *starting* learning rate
        origLR = K.get_value(self.model.optimizer.lr)
        K.set_value(self.model.optimizer.lr, startLR)

        # construct a callback that will be called at the end of each batch,
        # enabling us to increase our learning rate as training progresses
        callback = LambdaCallback(on_batch_end=lambda batch, logs: self.on_batch_end(batch, logs))

        # check to see if we are using a data iterator
        if useGen:
            self.model.fit_generator(trainData, steps_per_epoch=stepsPerEpoch, epochs=epochs,
                                     class_weight=classWeight, verbose=verbose, callbacks=[callback])
        # otherwise, our entire training data is already in memory
        else:
            # train our model using Keras' fit method
            self.model.fit(trainData[0], trainData[1], batch_size=batchSize, epochs=epochs,
                           class_weight=classWeight, callbacks=[callback], verbose=verbose)

        # restore the original model weights and learning rate
        self.model.load_weights(self.weightsFile)
        K.set_value(self.model.optimizer.lr, origLR)
Example #24
0
def run(
    train_files,
    batch_size,
    epochs,
    steps_per_epoch,
    learning_rate,
    learning_rate_decay,
    layers,
    rnn_sequence_length,
    dropout_pdrop,
    predict_length,
    export_dir,
    job_dir,
):
    model = create_model(layers, VOCAB_SIZE, learning_rate, learning_rate_decay, batch_size, dropout_pdrop)
    print(model.summary())

    def on_epoch_end(epoch, logs):
        c = "S"

        print("\n", end="")

        for _ in range(predict_length):
            print(c, end="")
            inp = np.zeros([batch_size, 1, VOCAB_SIZE])
            for i in range(batch_size):
                inp[i][0][encode(ord(c))] = 1.0
            prob = model.predict(inp, batch_size=batch_size)
            prob = np.reshape(prob, [batch_size, VOCAB_SIZE])
            prob = np.sum(prob, axis=0)
            rc = sample(prob)
            c = chr(decode(rc))

        print("\n")

    train_iterator = create_iterator(train_files, batch_size, rnn_sequence_length, VOCAB_SIZE, True)
    eval_iterator = create_iterator(train_files, batch_size, rnn_sequence_length, VOCAB_SIZE, True)

    model.fit(
        train_iterator,
        shuffle=False,
        steps_per_epoch=steps_per_epoch,
        epochs=epochs,
        validation_data=eval_iterator,
        validation_steps=1,
        callbacks=[
            TensorBoard(log_dir=job_dir),
            LambdaCallback(on_epoch_end=on_epoch_end),
        ],
    )

    if export_dir is not None:
        model.save("%s/model.h5" % export_dir)
        save_keras_model(model, "%s/web" % export_dir)
Example #25
0
def metrics_to_csv_logger(file_path, metrics):
    with open(file_path, "w", newline="") as file:
        wr = csv.writer(file, delimiter=";")
        wr.writerow(["batch"] + metrics)

    def callback(batch, logs):
        with open(file_path, "a", newline="") as file:
            wr = csv.writer(file, delimiter=";")
            wr.writerow([batch] + [logs[metric] for metric in metrics])

    return LambdaCallback(on_batch_end=callback)
Example #26
0
    def __init__(self, n_features):
        self.n_features = n_features
        self.model = tf.keras.Sequential()
        self.model.add(SFLayer(n_features))

        optimizer = tf.keras.optimizers.Adam(learning_rate=0.01)
        self.model.compile(optimizer=optimizer, loss=L1loss)

        self.weights = []
        self.Wcallback = LambdaCallback(
            on_epoch_end=lambda batch, logs: self.weights.append(
                self.model.layers[0].get_weights()[0]))
Example #27
0
    def train(self, gen):
        test_batch = next(gen)[0]

        self.model.fit_generator(
            gen,
            epochs=EPOCHS,
            steps_per_epoch=50,
            callbacks=[
                LambdaCallback(
                    on_batch_end=self.on_batch_end,
                    on_epoch_end=lambda *args: self.log_example(gen))
            ])
Example #28
0
    def train(self, train_data, test_data, output, epoch_size=1):
        
        x_train, y_train = train_data
        x_test, y_test = test_data
        
        callback = LambdaCallback(on_epoch_end=lambda epoch, logs: output((epoch+1)/epoch_size))
 
        self.autoencoder.fit(x_train, y_train, 
                             epochs=epoch_size, 
                             validation_data=(x_test, y_test),
                             batch_size=batch_size,
                             callbacks=callback)
def train_network():
    # 学習データの読み込み
    history = load_data()
    xs, y_policies, y_values = zip(*history)

    # 学習のための入力データのシェイプの変換
    a, b, c = DN_INPUT_SHAPE
    xs = np.array(xs)
    xs = xs.reshape(len(xs), c, a, b).transpose(0, 2, 3, 1)
    y_policies = np.array(y_policies)
    y_values = np.array(y_values)

    # ベストプレイヤーのモデルの読み込み
    model = load_model("./model/best.h5")

    # モデルのコンパイル
    model.compile(loss=["categorical_crossentropy", "mse"], optimizer="adam")

    # 学習率
    def step_decay(epoch):
        x = 0.001
        if epoch >= 50:
            x = 0.0005
        if epoch >= 80:
            x = 0.00025
        return x

    lr_decay = LearningRateScheduler(step_decay)

    # 出力
    print_callback = LambdaCallback(
        on_epoch_begin=lambda epoch, logs: print(
            "\rTrain {}/{}".format(epoch + 1, RN_EPOCHS), end=""
        )
    )

    # 学習の実行
    model.fit(
        xs,
        [y_policies, y_values],
        batch_size=BATCH_SIZE,
        epochs=RN_EPOCHS,
        verbose=0,
        callbacks=[lr_decay, print_callback],
    )
    print("")

    # 最新プレイヤーのモデルの保存
    model.save("./model/latest.h5")

    # モデルの破棄
    K.clear_session()
    del model
Example #30
0
def get_callbacks(volume_mount_dir, checkpoint_path, checkpoint_names, chars,
                  char_to_ix, train, model, timestamp):
    today_date = datetime.datetime.today().strftime('%Y-%m-%d')
    if not os.path.isdir(checkpoint_path):
        os.makedirs(checkpoint_path)
    filepath = os.path.join(checkpoint_path, checkpoint_names)
    checkpoint_callback = ModelCheckpoint(filepath=filepath,
                                          save_weights_only=False,
                                          monitor='val_loss')
    # Loss history callback
    epoch_results_callback = CSVLogger(os.path.join(
        volume_mount_dir,
        'training_log_{}_{:d}.csv'.format(today_date, timestamp)),
                                       append=True)
    sample_callback = LambdaCallback(
        on_epoch_end=lambda epoch, logs: on_epoch_end(train, epoch, model,
                                                      chars, char_to_ix, logs))

    batch_callback = LambdaCallback(
        on_batch_end=lambda batch, logs: on_batch_end(
            batch, logs, volume_mount_dir, timestamp))

    class SpotTermination(keras.callbacks.Callback):
        def on_batch_begin(self, batch, logs={}):
            try:
                status_code = requests.get(
                    "http://169.254.169.254/latest/meta-data/spot/instance-action"
                ).status_code
                if status_code != 404:
                    time.sleep(150)
            except:
                logger.warning("Request unsuccessful")

    spot_termination_callback = SpotTermination()

    callbacks = [
        checkpoint_callback, epoch_results_callback, sample_callback,
        batch_callback
    ]
    return callbacks