Beispiel #1
0
def load_history(name):
    f = open(name + ".pickle", "rb")
    history = pickle.load(f)
    f.close()
    h = History()
    h.history = history
    return h
Beispiel #2
0
def plot(his_paths):
    # Plot saved model history
    for history_path in his_paths:
        if os.path.exists(history_path):
            # Save model history
            with open(history_path, 'rb') as file_pi:
                model_history = History()
                model_history.history = pickle.load(file_pi)
                plot_model_history(model_history)
        else:
            raise ValueError('No model history found')
Beispiel #3
0
    def fit_generator_manual(self,
                             train_datagen,
                             nb_train_samples,
                             nb_epochs,
                             val_datagen,
                             nb_val_samples,
                             verbose=1):
        # Manual Mode
        loss, val_loss = [], []
        for e in range(nb_epochs):
            batch_loss, batch_val_loss = [], []
            train_batch_counter, val_batch_counter = 0, 0
            nb_training_batches = nb_train_samples / BATCH_SIZE
            nb_val_batches = nb_train_samples / BATCH_SIZE

            # Training
            with tqdm(total=nb_train_samples, desc='Training Samples') as pbar:
                for X_batch, y_batch in train_datagen:
                    if ZERO_PENALIZING:
                        X_batch, y_batch = self.zero_penalize(e, train_datagen)
                    batch_loss.append(
                        self.model.train_on_batch(X_batch, y_batch))
                    pbar.update(BATCH_SIZE)
                    train_batch_counter += 1
                    if train_batch_counter >= nb_training_batches:
                        break

            # Validation
            for X_batch, y_batch in val_datagen:
                batch_val_loss.append(
                    self.model.test_on_batch(X_batch, y_batch))
                val_batch_counter += 1
                if val_batch_counter >= nb_val_batches:
                    break

            loss.append(sum(batch_loss) / float(len(batch_loss)))
            val_loss.append(sum(batch_val_loss) / float(len(batch_val_loss)))

            if verbose == 1:
                print(
                    'Manual Fit. Epoch {:02d}/{:02d}: loss: {:>8.1f} - val_loss {:>8.1f}'
                    .format(e, nb_epochs, loss[e], val_loss[e]))

        history = History()
        history.history = {'loss': loss, 'val_loss': val_loss}

        if ZERO_PENALIZING:
            history.history['filtered_y'] = self.filtered_y

        return history
Beispiel #4
0
def concatenate_history(h):
    """Concatenate History objects.

    Useful for layers fine tuning.

    Parameters:
    --------------
    h: list
        List of history objects to concatenate.

    Returns:
    --------------
    history: `keras.callbacks.History`
        The resulting `History` object.
    """
    h0, h1 = h[0], h[1]
    history = History()
    history.history = {}
    history.epoch = h0.epoch + h1.epoch

    for k in h0.history.keys():
        history.history[k] = h0.history[k] + h1.history[k]

    return history
Beispiel #5
0
def resn(inputShape, outputSize, depthMapShape, type='resnet34'):
    outName = type + '_synth_and_real_white_cloth_y_{nov}_range1'.format(
        nov=numOutputVertices)

    # Save data parameters
    fileBestModel = outName + '_model.h5'
    fileHistory = outName + '_history.pkl'
    fileWeights = outName + '_weights.h5'

    pathW = os.path.join(ldPaths['weights'], fileWeights)
    pathM = os.path.join(ldPaths['models'], fileBestModel)
    pathH = os.path.join(ldPaths['histories'], fileHistory)

    # Create model.
    logging.info('Creating model {t}.'.format(t=type))
    modelSynth = resnet.createModel(inputShape,
                                    outputSize,
                                    type=type,
                                    name='synth')
    modelReal = Model([modelSynth.input], [modelSynth.output], name='real')

    optimizerSynth = adam(lr=lr)
    optimizerReal = adam(lr=lr)

    modelSynth.compile(loss=meanVertexPairEuclideanDistanceTF,
                       optimizer=optimizerSynth)

    depthMSE = getDepthMSEObjective(depthMapShape, A, C)
    # depthMSE = depthMSE_TEST

    modelReal.compile(loss=depthMSE, optimizer=optimizerReal)

    # Save model.
    # if flagSaveModel:
    #     logging.info('Saving model: {}'.format(pathM))
    #     saveModel(pathM, model)

    # Create history.
    history = History()
    history.history = {
        'loss_synth': [],
        'loss_real': [],
        'val_loss_synth': [],
        'val_loss_real': []
    }
    history.model = None
    modelSynth.history = history

    # Callbacks
    hSaver = HistorySaver(fileName=pathH, period=1)
    wSaver = WeightsSaver(fileName=pathW, period=1)
    hSaver.model = modelSynth
    wSaver.model = modelSynth

    logging.info('Training model.')

    # Training loop
    numBatchesRealTr = int(np.ceil(NRealTr / batchSize))
    numBatchesSynthVa = int(np.ceil(NSynthVa / batchSize))
    numBatchesRealVa = int(np.ceil(NRealVa / batchSize))
    for ep in range(epochs):
        print('Epoch {ep}'.format(ep=ep + 1))

        tStart = timer()
        for it in range(numBatchesRealTr):
            print('Batch: {b}/{tb}.'.format(b=it + 1, tb=numBatchesRealTr),
                  end=' ')

            # Get next synth batch.
            genOutputSynth = loadBatch(queSynthTr, _stopSynthTr)
            batchXtrSynth = genOutputSynth[0]
            batchYtrSynth = genOutputSynth[1]

            # Train on 1 synth batch.
            lossTrSynth = float(
                modelSynth.train_on_batch(batchXtrSynth, batchYtrSynth))

            # Get next real batch.
            genOutputReal = loadBatch(queRealTr, _stopRealTr)
            batchXtrReal = genOutputReal[0]
            batchYtrReal = genOutputReal[1]

            # Train on 1 real batch.
            lossTrReal = float(
                modelReal.train_on_batch(batchXtrReal, batchYtrReal))

            print('Training loss (synth/real): {s:.4f}/{r:.4f}.'.format(
                s=lossTrSynth, r=lossTrReal),
                  end=' ')
            print('t: {:0.2f} s.'.format(timer() - tStart),
                  end=('\r', ' ')[it == numBatchesRealTr - 1])

        # Validation for synth data.
        lossVaSynth = 0.0
        for it in range(numBatchesSynthVa):
            # Get next batch.
            genOutput = loadBatch(queSynthVa, _stopSynthVa)
            batchXva = genOutput[0]
            batchYva = genOutput[1]

            # Validate on 1 batch.
            lossVaSynth += batchXva.shape[0] * \
                float(modelSynth.evaluate(batchXva, batchYva, verbose=0))
        lossVaSynth /= NSynthVa

        # Validation for real data.
        lossVaReal = 0.0
        for it in range(numBatchesRealVa):
            # Get next batch.
            genOutput = loadBatch(queRealVa, _stopRealVa)
            batchXva = genOutput[0]
            batchYva = genOutput[1]

            # Validate on 1 batch.
            lossVaReal += batchXva.shape[0] * \
                           float(modelReal.evaluate(batchXva, batchYva, verbose=0))
        lossVaReal /= NRealVa

        print('Validation loss (synth/real): {s:.4f}/{r:.4f}'.format(
            s=lossVaSynth, r=lossVaReal))

        # Update history.
        history.history['loss_synth'].append(lossTrSynth)
        history.history['loss_real'].append(lossTrReal)
        history.history['val_loss_synth'].append(lossVaSynth)
        history.history['val_loss_real'].append(lossVaReal)

        # Save weights, history.
        if flagSaveHistory:
            hSaver.on_epoch_end(ep)
        if flagSaveWeights:
            wSaver.on_epoch_end(ep)