def trainingLoopAE(obj, model, num_epoch, train_dataloader, criterion,
                   optimizer, loss_dict, path, device):
    """
    A universal training loop to optimize any loss function

    Args:
    
    obj       : Object on which model is to be trained
    model     : Architecture of the neural network
    num_epoch : number of iterations
    criterion : loss function which we need to optimize
    train_dataloader : Training Dataset
    optimizer : optimize the loss using this optimizer
    loss_dict : A dictionary to keep track of loss
    path      : location where to store model
    device    : GPU or CPU device
    """

    # Check if path to directory exist. If no: then create one
    if not os.path.exists(path):
        os.makedirs(path)

    curr_epoch = currEpoch(path)
    epochs_left = num_epoch - curr_epoch

    # Load Model
    if curr_epoch is not -1:
        model.load_state_dict(torch.load(path + '/' + str(curr_epoch)))
        # print(model)
        model.eval()

    for epoch in range(epochs_left):  # loop over the dataset multiple times
        running_loss = 0.0

        for data in train_dataloader:
            data = data.to(device)

            # zero the parameter gradients
            optimizer.zero_grad()

            # forward + backward + optimize
            outputs = model(data)
            loss = criterion(outputs, data)
            loss.backward()
            optimizer.step()

            # print statistics
            running_loss += loss.item()

        loss_dict[model.name].append(running_loss)

        if epoch % 50 == 0:  # print every 50 mini-batches
            plotLoss(loss_dict, model.name)
            plotPointCloud(obj, model)
            saveModel(path, model, epoch)
            print(epoch)
Esempio n. 2
0
def train(epochs=1, batchSize=128):
    batchCount = X_train.shape[0] // batchSize
    print('[INIT] Epochs:', epochs)
    print('[INIT] Batch size:', batchSize)
    print('[INIT] Batches per epoch:', batchCount)

    for e in range(1, epochs + 1):
        print('-' * 15, 'Epoch %d' % e, '-' * 15)
        for _ in tqdm(range(batchCount)):
            # Get a random set of input noise and images
            noise = np.random.normal(0, 1, size=[batchSize, LATENT_SPACE_DIM])
            imageBatch = X_train[np.random.randint(0,
                                                   X_train.shape[0],
                                                   size=batchSize)]

            # Generate fake MNIST images
            generatedImages = generator.predict(noise)
            X = np.concatenate([imageBatch, generatedImages])

            # Labels for generated and real data
            yDis = np.zeros(2 * batchSize)
            # One-sided label smoothing
            yDis[:batchSize] = 0.9

            # Train discriminator
            discriminator.trainable = True
            dloss = discriminator.train_on_batch(X, yDis)

            # Train generator
            noise = np.random.normal(0, 1, size=[batchSize, LATENT_SPACE_DIM])
            yGen = np.ones(batchSize)
            discriminator.trainable = False
            gloss = gan.train_on_batch(noise, yGen)

        # Store loss of most recent batch from this epoch
        dLosses.append(dloss)
        gLosses.append(gloss)

        if e == 1 or e % 5 == 0:
            plotGeneratedImages(e, LATENT_SPACE_DIM)
            saveModels(e, generator, discriminator)

    # Plot losses from every epoch
    plotLoss(e, dLosses, gLosses)
        LossHist = OpenPickle('Loss','Loss_{}'.format(j))


        # write score to pickle
        SaveToPickle(score_history,'Score', 'Score_{}'.format(j))

        # read pickle
        ScoreHist = OpenPickle('Score','Score_{}'.format(j))

        #PLOT

        # plot learning
        filename = 'Plots\Average\Evolving_Average{}.png'.format(j)
        plotLearning(ScoreHist, filename, window=100)

        # plot loss
        filename2 = 'Plots\Loss\Evolving_Loss{}.png'.format(j)
        plotLoss(LossHist,filename2)

        #Plot scores for each game in one CV

        filename3 = 'Plots\Score\Game_Scores{}.png'.format(j)
        plotScore(n_episodes,score_j,filename3)

# write parameters to pickle
SaveToPickle(parameters, 'Features', 'Parameters')


SaveToPickle(scores_mat, 'Features', 'Matrix_scores')

Esempio n. 4
0
def training(trainmatrices, trainchromatinpaths, trainchromosomes,
             validationmatrices, validationchromatinpaths,
             validationchromosomes, sequencefile, outputpath, modelfilepath,
             learningrate, numberepochs, batchsize, recordsize, windowsize,
             flankingsize, maxdist, scalematrix, clampfactors, scalefactors,
             binsizefactors, modeltype, scoreweight, scoresize, tvweight,
             structureweight, perceptionweight, optimizer, loss,
             pixellossweight, earlystopping, debugstate, figuretype, savefreq,
             flipsamples):
    #save the input parameters so they can be written to csv later
    paramDict = locals().copy()

    if debugstate is not None and debugstate != "Figures":
        debugstate = int(debugstate)

    if maxdist is not None:
        maxdist = min(windowsize, maxdist)
        paramDict["maxdist"] = maxdist

    if flankingsize is None:
        flankingsize = windowsize
        paramDict["flankingsize"] = flankingsize

    if scoresize is None and scoreweight > 0.0:
        scoresize = int(windowsize * 0.25)
        paramDict["scoresize"] = scoresize

    #workaround for AlreadyExistsException when using perception loss
    #root cause seems to be a bug in grappler?
    if perceptionweight > 0.0:
        tf.config.optimizer.set_experimental_options(
            {"arithmetic_optimization": False})

    trainChromNameList = trainchromosomes.rstrip().split(" ")
    trainChromNameList = sorted([x.lstrip("chr") for x in trainChromNameList])

    validationChromNameList = validationchromosomes.rstrip().split(" ")
    validationChromNameList = sorted(
        [x.lstrip("chr") for x in validationChromNameList])

    #number of train matrices must match number of chromatin paths
    #this is useful for training on matrices and chromatin factors
    #from different cell lines
    if len(trainmatrices) != len(trainchromatinpaths):
        msg = "Number of train matrices and chromatin paths must match\n"
        msg += "Current numbers: Matrices: {:d}; Chromatin Paths: {:d}"
        msg = msg.format(len(trainmatrices), len(trainchromatinpaths))
        raise SystemExit(msg)
    if len(validationmatrices) != len(validationchromatinpaths):
        msg = "Number of validation matrices and chromatin paths must match\n"
        msg += "Current numbers: Matrices: {:d}; Chromatin Paths: {:d}"
        msg = msg.format(len(validationmatrices),
                         len(validationchromatinpaths))
        raise SystemExit(msg)
    if len(binsizefactors
           ) != len(trainchromatinpaths) + len(validationchromatinpaths):
        msg = "--binsizeFactors/-bsf must be specified for each chromatin path"
        raise SystemExit(msg)
    #binsize list - we have one traindata container per chrom and per matrix/chromatin path
    #container order is chromosomes first
    binsize_train_list = binsizefactors[:len(trainchromatinpaths)] * len(
        trainChromNameList)
    binsize_val_list = binsizefactors[len(trainchromatinpaths):] * len(
        validationChromNameList)

    #check if chosen model type matches inputs
    modelTypeStr = checkSetModelTypeStr(modeltype, sequencefile)
    paramDict["modeltype"] = modelTypeStr

    #select the correct class for the data container
    containerCls = dataContainer.DataContainer

    #prepare the training data containers. No data is loaded yet.
    traindataContainerList = []
    for chrom in trainChromNameList:
        for matrix, chromatinpath in zip(trainmatrices, trainchromatinpaths):
            container = containerCls(chromosome=chrom,
                                     matrixfilepath=matrix,
                                     chromatinFolder=chromatinpath,
                                     sequencefilepath=sequencefile,
                                     mode="training")
            traindataContainerList.append(container)

    #prepare the validation data containers. No data is loaded yet.
    validationdataContainerList = []
    for chrom in validationChromNameList:
        for matrix, chromatinpath in zip(validationmatrices,
                                         validationchromatinpaths):
            container = containerCls(chromosome=chrom,
                                     matrixfilepath=matrix,
                                     chromatinFolder=chromatinpath,
                                     sequencefilepath=sequencefile,
                                     mode="validation")
            validationdataContainerList.append(container)

    #define the load params for the containers
    loadParams = {
        "scaleFeatures": scalefactors,
        "clampFeatures": clampfactors,
        "scaleTargets": scalematrix,
        "windowsize": windowsize,
        "flankingsize": flankingsize,
        "maxdist": maxdist
    }
    #now load the data and write TFRecords, one container at a time.
    if len(traindataContainerList) == 0:
        msg = "Exiting. No data found"
        print(msg)
        return  #nothing to do
    container0 = traindataContainerList[0]
    tfRecordFilenames = []
    nr_samples_list = []
    for container, feat_binsize in zip(
            traindataContainerList + validationdataContainerList,
            binsize_train_list + binsize_val_list):
        loadParams["featureBinsize"] = feat_binsize
        container.loadData(**loadParams)
        if not container0.checkCompatibility(container):
            msg = "Aborting. Incompatible data"
            raise SystemExit(msg)
        tfRecordFilenames.append(
            container.writeTFRecord(pOutfolder=outputpath,
                                    pRecordSize=recordsize))
        if debugstate is not None:
            if isinstance(debugstate, int):
                idx = debugstate
            else:
                idx = None
            container.plotFeatureAtIndex(idx=idx,
                                         outpath=outputpath,
                                         figuretype=figuretype)
            container.saveMatrix(outputpath=outputpath, index=idx)
        nr_samples_list.append(container.getNumberSamples())
    traindataRecords = [
        item for sublist in tfRecordFilenames[0:len(traindataContainerList)]
        for item in sublist
    ]
    validationdataRecords = [
        item for sublist in tfRecordFilenames[len(traindataContainerList):]
        for item in sublist
    ]

    #different binsizes are ok, as long as no sequence is used
    #not clear which binsize to use for prediction when they differ during training.
    #For now, store the max.
    binsize = max(
        [container.matrix_binsize for container in traindataContainerList])
    paramDict["binsize"] = binsize
    feature_binsize = max(
        [container.feature_binsize for container in traindataContainerList])
    paramDict["feature_binsize"] = feature_binsize
    #because of compatibility checks above,
    #the following properties are the same with all containers,
    #so just use data from first container
    nr_factors = container0.nr_factors
    paramDict["nr_factors"] = nr_factors
    for i in range(nr_factors):
        paramDict["chromFactor_" + str(i)] = container0.factorNames[i]
    sequenceSymbols = container0.sequenceSymbols
    nr_symbols = None
    if isinstance(sequenceSymbols, set):
        nr_symbols = len(sequenceSymbols)
    nr_trainingSamples = sum(nr_samples_list[0:len(traindataContainerList)])
    storedFeaturesDict = container0.storedFeatures

    #unload the data, it is no longer required and consumes memory
    for container in traindataContainerList + validationdataContainerList:
        container.unloadData()

    #build the requested model
    model = models.buildModel(pModelTypeStr=modelTypeStr,
                              pWindowSize=windowsize,
                              pBinSizeInt=binsize,
                              pNrFactors=nr_factors,
                              pNrSymbols=nr_symbols,
                              pFlankingSize=flankingsize,
                              pMaxDist=maxdist,
                              pBinsizeFactor=container0.matrix_binsize //
                              container0.feature_binsize)
    #define optimizer
    kerasOptimizer = models.getOptimizer(pOptimizerString=optimizer,
                                         pLearningrate=learningrate)

    #build and print the model
    model.build(input_shape=(windowsize + 2 * flankingsize, nr_factors))
    model.summary()

    #writers for tensorboard
    traindatapath = os.path.join(outputpath, "train/")
    validationDataPath = os.path.join(outputpath, "validation/")
    if os.path.exists(traindatapath):
        [
            os.remove(os.path.join(traindatapath, f))
            for f in os.listdir(traindatapath)
        ]
    if os.path.exists(validationDataPath):
        [
            os.remove(os.path.join(validationDataPath, f))
            for f in os.listdir(validationDataPath)
        ]
    summary_writer_train = tf.summary.create_file_writer(traindatapath)
    summary_writer_val = tf.summary.create_file_writer(validationDataPath)

    #save the training parameters to a file before starting to train
    #(allows recovering the parameters even if training is aborted
    # and only intermediate models are available)
    parameterFile = os.path.join(outputpath, "trainParams.csv")
    with open(parameterFile, "w") as csvfile:
        dictWriter = csv.DictWriter(csvfile,
                                    fieldnames=sorted(list(paramDict.keys())))
        dictWriter.writeheader()
        dictWriter.writerow(paramDict)

    #plot the model using workaround from tensorflow issue #38988
    modelPlotName = "model.{:s}".format(figuretype)
    modelPlotName = os.path.join(outputpath, modelPlotName)
    model._layers = [
        layer for layer in model._layers
        if isinstance(layer, tf.keras.layers.Layer)
    ]  #workaround for plotting with custom loss functions
    tf.keras.utils.plot_model(model, show_shapes=True, to_file=modelPlotName)

    #build input streams
    mirror_idxs = records.get_mirror_indices(windowsize)
    shuffleBufferSize = 3 * recordsize
    trainDs = tf.data.TFRecordDataset(
        traindataRecords,
        num_parallel_reads=tf.data.experimental.AUTOTUNE,
        compression_type="GZIP")
    trainDs = trainDs.map(
        lambda x: records.parse_function(x, storedFeaturesDict),
        num_parallel_calls=tf.data.experimental.AUTOTUNE)
    if flipsamples:
        tds_mirrored = trainDs.map(lambda a, b: records.mirror_function(
            a["factorData"], b["out_matrixData"], mirror_idxs))
        if debugstate == "Figures":
            tk1 = list(trainDs.take(1).as_numpy_iterator())
            tk2 = list(tds_mirrored.take(1).as_numpy_iterator())
            tk1_facs = tk1[0][0]["factorData"]
            tk1_mat = np.zeros((windowsize, windowsize))
            tk1_mat[np.triu_indices(windowsize)] = tk1[0][1]["out_matrixData"]
            tk2_facs = tk2[0][0]["factorData"]
            tk2_mat = np.zeros_like(tk1_mat)
            tk2_mat[np.triu_indices(windowsize)] = tk2[0][1]["out_matrixData"]
            fig1, axs1 = plt.subplots(2, 2)
            m1 = axs1[0, 0].imshow(np.log(tk1_mat + 1))
            m2 = axs1[0, 1].imshow(np.log(tk2_mat + 1))
            m3 = axs1[1, 0].imshow(tk1_facs, aspect="auto")
            m4 = axs1[1, 1].imshow(tk2_facs, aspect="auto")
            t1 = axs1[1, 0].set_title("standard")
            t2 = axs1[1, 1].set_title("flipped")
            axs1[1, 0].xaxis.set_visible(False)
            axs1[1, 1].xaxis.set_visible(False)
            flipfigname = "flippedVsStd.{:s}".format(figuretype)
            flipfigname = os.path.join(outputpath, flipfigname)
            fig1.savefig(flipfigname)
            plt.close(fig1)
            del axs1, fig1, flipfigname, m1, m2, m3, m4, t1, t2
            del tk1, tk1_facs, tk1_mat, tk2, tk2_facs, tk2_mat
        trainDs = trainDs.concatenate(tds_mirrored)
    trainDs = trainDs.shuffle(buffer_size=shuffleBufferSize,
                              reshuffle_each_iteration=True)
    trainDs = trainDs.batch(batchsize, drop_remainder=True)
    trainDs = trainDs.prefetch(tf.data.experimental.AUTOTUNE)
    validationDs = tf.data.TFRecordDataset(
        validationdataRecords,
        num_parallel_reads=tf.data.experimental.AUTOTUNE,
        compression_type="GZIP")
    validationDs = validationDs.map(
        lambda x: records.parse_function(x, storedFeaturesDict),
        num_parallel_calls=tf.data.experimental.AUTOTUNE)
    validationDs = validationDs.batch(batchsize)
    validationDs = validationDs.prefetch(tf.data.experimental.AUTOTUNE)

    weights_before = model.layers[1].weights[0].numpy()

    #filename for plots
    lossPlotFilename = "lossOverEpochs.{:s}".format(figuretype)
    lossPlotFilename = os.path.join(outputpath, lossPlotFilename)
    #models for converting predictions as needed
    percLossMod = models.getPerceptionModel(windowsize)
    grayscaleMod = models.getGrayscaleConversionModel(scalingFactor=0.999,
                                                      windowsize=windowsize)
    #get the per-pixel loss function (also used for perception loss and score loss)
    loss_fn = models.getPerPixelLoss(loss)
    #lists to store loss for each epoch
    trainLossList_epochs = []
    valLossList_epochs = []
    #iterate over all epochs and batches in the train/validation datasets
    #compute gradients and update weights accordingly
    samples_per_epoch = int(np.floor(nr_trainingSamples / batchsize))
    if flipsamples:
        samples_per_epoch *= 2
    for epoch in range(numberepochs):
        pbar_batch = tqdm(trainDs, total=samples_per_epoch)
        pbar_batch.set_description("Epoch {:05d}".format(epoch + 1))
        trainLossList_batches = []  #lists to store loss for each batch
        for x, y in pbar_batch:
            lossVal = trainStep(creationModel=model,
                                grayscaleConversionModel=grayscaleMod,
                                factorInputBatch=x,
                                targetInputBatch=y,
                                optimizer=kerasOptimizer,
                                perceptionLossModel=percLossMod,
                                pixelLossWeight=pixellossweight,
                                ssimWeight=structureweight,
                                tvWeight=tvweight,
                                perceptionWeight=perceptionweight,
                                scoreWeight=scoreweight,
                                lossFn=loss_fn)
            trainLossList_batches.append(lossVal)
            pbar_batch.set_postfix({"loss": "{:.4f}".format(lossVal)})
        trainLossList_epochs.append(np.mean(trainLossList_batches))
        trainLossList_batches = []
        with summary_writer_train.as_default():  #pylint: disable=not-context-manager
            tf.summary.scalar('train_loss',
                              trainLossList_epochs[epoch],
                              step=epoch + 1)
        valLossList_batches = []
        for x, y in validationDs:
            val_loss = validationStep(creationModel=model,
                                      factorInputBatch=x,
                                      targetInputBatch=y,
                                      pixelLossWeight=pixellossweight)
            valLossList_batches.append(val_loss)
        valLossList_epochs.append(np.mean(valLossList_batches))
        valLossList_batches = []
        with summary_writer_val.as_default():  #pylint: disable=not-context-manager
            tf.summary.scalar('validation_loss',
                              valLossList_epochs[epoch],
                              step=epoch + 1)
        #plot loss and save figure every savefreq epochs
        if (epoch + 1) % savefreq == 0:
            checkpointFilename = "checkpoint_{:05d}.h5".format(epoch + 1)
            checkpointFilename = os.path.join(outputpath, checkpointFilename)
            model.save(filepath=checkpointFilename, save_format="h5")
            utils.plotLoss(
                pLossValueLists=[trainLossList_epochs, valLossList_epochs],
                pNameList=["train", "validation"],
                pFilename=lossPlotFilename)
            #save the loss values so that they can be plotted again in different formats later on
            valLossFilename = "val_loss_{:05d}.npy".format(epoch + 1)
            trainLossFilename = "train_loss_{:05d}.npy".format(epoch + 1)
            valLossFilename = os.path.join(outputpath, valLossFilename)
            trainLossFilename = os.path.join(outputpath, trainLossFilename)
            np.save(valLossFilename, valLossList_epochs)
            np.save(trainLossFilename, trainLossList_epochs)
            del valLossFilename, trainLossFilename

    weights_after = model.layers[1].weights[0].numpy()
    print("weight sum before", np.sum(weights_before))
    print("weight sum after", np.sum(weights_after))

    #store the trained network
    model.save(filepath=modelfilepath, save_format="h5")

    #plot final train- and validation loss over epochs
    utils.plotLoss(pLossValueLists=[trainLossList_epochs, valLossList_epochs],
                   pNameList=["train", "validation"],
                   pFilename=lossPlotFilename)

    #delete train- and validation records, if debugstate not set
    if debugstate is None or debugstate == "Figures":
        for record in tqdm(traindataRecords + validationdataRecords,
                           desc="Deleting TFRecord files"):
            if os.path.exists(record):
                os.remove(record)
    def train(self):
        print('Start to run in mode [Supervied Learning in Source Domain]')
        self.sess.run(tf.global_variables_initializer())
        self.train_itr = len(self.training_data[0]) // self.bs

        self.best_val_accuracy = []
        self.best_val_loss = []

        for e in range(1, self.eps + 1):
            _tr_img, _tr_lab = DA_init.shuffle_data(self.training_data[0],
                                                    self.training_data[1])

            training_acc = 0.0
            training_loss = 0.0

            for itr in range(self.train_itr):
                _tr_img_batch, _tr_lab_batch = DA_init.next_batch(
                    _tr_img, _tr_lab, self.bs, itr)
                _train_accuracy, _train_loss, _ = self.sess.run(
                    [self.accuracy, self.loss, self.train_op],
                    feed_dict={
                        self.x: _tr_img_batch,
                        self.y: _tr_lab_batch,
                        self.is_training: True
                    })
                training_acc += _train_accuracy
                training_loss += _train_loss

            summary = self.sess.run(self.merged,
                                    feed_dict={
                                        self.x: _tr_img_batch,
                                        self.y: _tr_lab_batch,
                                        self.is_training: False
                                    })

            training_acc = float(training_acc / self.train_itr)
            training_loss = float(training_loss / self.train_itr)

            validation_acc, validation_loss = self.validation_procedure()
            self.best_val_accuracy.append(validation_acc)
            self.best_val_loss.append(validation_loss)

            log1 = "Epoch: [%d], Training Accuracy: [%g], Validation Accuracy: [%g], Loss Training: [%g] " \
                   "Loss_validation: [%g], Time: [%s]" % \
                   (e, training_acc, validation_acc, training_loss, validation_loss, time.ctime(time.time()))

            self.plt_epoch.append(e)
            self.plt_training_accuracy.append(training_acc)
            self.plt_training_loss.append(training_loss)
            self.plt_validation_accuracy.append(validation_acc)
            self.plt_validation_loss.append(validation_loss)

            utils.plotAccuracy(x=self.plt_epoch,
                               y1=self.plt_training_accuracy,
                               y2=self.plt_validation_accuracy,
                               figName=self.model,
                               line1Name='training',
                               line2Name='validation',
                               savePath=self.ckptDir)

            utils.plotLoss(x=self.plt_epoch,
                           y1=self.plt_training_loss,
                           y2=self.plt_validation_loss,
                           figName=self.model,
                           line1Name='training',
                           line2Name='validation',
                           savePath=self.ckptDir)

            utils.save2file(log1, self.ckptDir, self.model)

            self.writer.add_summary(summary, e)

            self.saver.save(self.sess,
                            self.ckptDir + self.model + '-' + str(e))

            self.test_procedure()

        self.best_val_index = self.best_val_accuracy.index(
            max(self.best_val_accuracy))
        log2 = 'Highest Validation Accuracy : [%g], Epoch : [%g]' % (
            self.best_val_accuracy[self.best_val_index],
            self.best_val_index + 1)
        utils.save2file(log2, self.ckptDir, self.model)

        self.best_val_index_loss = self.best_val_loss.index(
            min(self.best_val_loss))
        log3 = 'Lowest Validation Loss : [%g], Epoch : [%g]' % (
            self.best_val_loss[self.best_val_index_loss],
            self.best_val_index_loss + 1)
        utils.save2file(log3, self.ckptDir, self.model)
Esempio n. 6
0

    """ Train Word2Vec """

    print('\nTraining W2V...')
    miniW2V = trainW2V(text, windowSize=8, negWords=20, embedDim=200, nOccur=10, phMinCount=10, phThresh=15, phDepth=4, 
                 wInit='xavier', raw=True, optimizer='Adagrad', epochs=100, lr=0.01, patience=5, epsilon=1e-7, 
                 tShuff=True, saveFreq=-1, outPath=basepath)


    print([x for x in miniW2V.trainDs.rwDict.index if '_' in x])

    miniW2V.train()
    miniW2V.saveModel(name='_best_{:.5f}'.format(miniW2V.earlStop.bestScore))

    print('Total number of words: {:d}'.format(len(flattenByAdd(miniW2V.trainDs.subsample))))
    print('Dictionary size: {:d}'.format(miniW2V.trainDs.wDict.shape[0]))

    embed = pd.DataFrame(miniW2V.getEmbedded()[:miniW2V.trainDs.rwDict.shape[0]], index=miniW2V.trainDs.rwDict.index)
    embed.to_hdf(os.path.join(basepath,'embedded.h5'),key='df')

    """ Plotting """

    plotLoss(miniW2V.losses, path=os.path.join(basepath,'batch_loss.png'))

    words=[miniW2V.trainDs.rwDict.index[0],miniW2V.trainDs.rwDict.index[2],miniW2V.trainDs.rwDict.index[3]]
    plotUmap(embed, words=words, path=basepath)
    
    for word in words:
        print(word,findClosest(embed, word))
Esempio n. 7
0
def trainingLoopGAN(obj,
                    training_generator,
                    generator,
                    discriminator,
                    model_name,
                    num_epoch,
                    optimizer_g,
                    optimizer_d,
                    loss_dict,
                    path,
                    device,
                    ae=None,
                    mu=0,
                    sigma=0.2,
                    discriminator_boost=5,
                    lambda_gp=10):
    """
    A universal training loop to optimize any loss function

    Args:

    obj       : Object on which model is to be trained
    training_generator  : Training Set Generator
    generator           : Architecture of the generator
    discriminator       : Architecture of the generator
    num_epoch           : number of iterations
    optimizer_g         : optimize the loss for generator using this optimizer
    optimizer_d         : optimize the loss for discriminator using this optimizer
    loss_dict           : A dictionary to keep track of loss
    path                : location where to store model
    device              : GPU or CPU device
    ae                  : Autoencoder Model
    mu, sigma           : Mean and Standard Deviation for Normal Distribution 
    discriminator_boost : For every training iteration of generator train the 
                            critic this many times
    lambda_gp           : Regularizing factor for Gradient Penalty
    """

    # Check if path to directory exist. If no: then create one
    if not os.path.exists(path):
        os.makedirs(path)

    for epoch in range(num_epoch):  # loop over the dataset multiple times
        running_loss_g = 0.0
        running_loss_d = 0.0

        for data in training_generator:
            data = data.to(device)
            if ae is not None:
                data = ae(data)
            for _ in range(discriminator_boost):
                # zero the parameter gradients
                optimizer_d.zero_grad()

                # forward + backward + optimize
                noise = noiseFunc(mu, sigma, data.shape[0], device)
                outputs = generator(noise)
                loss_d_fake = discriminator(outputs).mean()
                loss_d_fake.backward()

                loss_d_real = discriminator(data).mean()
                loss_d = loss_d_real + loss_d_fake

                # Gradient Penalty for Latent GAN
                if 'Latent' in generator.name:
                    grad_penal = compute_gradient_penalty(
                        discriminator, data, outputs, device)
                    loss_d = loss_d + lambda_gp * grad_penal
                optimizer_d.step()
                running_loss_d += loss_d

            optimizer_g.zero_grad()
            noise = torch.randn((50, 128)).to(device)
            outputs = generator(noise)
            loss_g = discriminator(outputs).mean()
            loss_g.backward()
            optimizer_g.step()
            running_loss_g += loss_g

        loss_dict[generator.name].append(running_loss_g)
        loss_dict[discriminator.name].append(running_loss_d)

        if epoch % 50 == 0:  # print every 10 mini-batches
            plotLoss(loss_dict, model_name)
            plotPointCloud(obj, generator)
            saveModel(path + 'Gen ', generator, epoch)
            saveModel(path + 'Dis ', discriminator, epoch)
            print(epoch)
Esempio n. 8
0
#Error
error_P = np.linalg.norm(P_test-P_pred,2)/np.linalg.norm(P_test,2)
print("Test Error in P: "+str(error_P))
error_rho = np.linalg.norm(rho_test-rho_pred,2)/np.linalg.norm(rho_test,2)
print("Test Error in rho: "+str(error_rho))
error_u = np.linalg.norm(u_test-u_pred,2)/np.linalg.norm(u_test,2)
print("Test Error in u: "+str(error_u))
error_v = np.linalg.norm(v_test-v_pred,2)/np.linalg.norm(v_test,2)
print("Test Error in v: "+str(error_v))
error_Et = np.linalg.norm(Et_test- Et_pred,2)/np.linalg.norm(Et_test,2)
print("Test Error in E: "+str(error_Et))
error_T = np.linalg.norm(T_test- T_pred,2)/np.linalg.norm(T_test,2)
print("Test Error in T: "+str(error_T))


if(args.plot == "1"):
    utils.plotLoss(model_path + '%s_bp=%s_loss.csv'%(project_name,pb), 0,1000,0,1)
    utils.plotAll(model_path,
                    x_test,
                    y_test,
                    P_test,
                    rho_test,
                    u_test,
                    v_test,
                    Et_test,
                    P_pred,
                    rho_pred,
                    u_pred,
                    v_pred,
                    Et_pred)
history1 = model.fit(x_train,
                     y_train,
                     batch_size=batch_size,
                     validation_data=(x_valid, y_valid),
                     epochs=num_epochs,
                     callbacks=my_callbacks1)

with open('vgg_history.pkl', 'wb') as f:
    pickle.dump(history1.history, f)

###
model.evaluate(x_test, y_test)

val_loss = (history1.history['val_loss'])
train_loss = (history1.history['loss'])
plotLoss(val_loss, train_loss)

val_accuracy = history1.history['val_accuracy']
train_accuracy = history1.history['accuracy']
plotAccuracy(val_accuracy, train_accuracy)

y_pred = model.predict(x_test)
y_test_label = [class_labels[i] for i in np.argmax(y_test, axis=1)]
y_pred_label = [class_labels[i] for i in np.argmax(y_pred, axis=1)]
plotConfusionMatrix(class_labels,
                    y_test_label,
                    y_pred_label,
                    title='Confusion matrix - vgg16')

#---
### train model with data augmentation
Esempio n. 10
0
print("Test Error in u: "+str(error_u))
error_v = np.linalg.norm(v_test-v_pred,2)/np.linalg.norm(v_test,2)
print("Test Error in v: "+str(error_v))
error_Et = np.linalg.norm(Et_test- Et_pred,2)/np.linalg.norm(Et_test,2)
print("Test Error in E: "+str(error_Et))


print("%.3f\t%.3f\t%.3f\t%.3f\t%.3f"%(error_P,error_rho,error_u,error_v,error_Et))
print("%.3f"%((error_P+error_rho+error_u+error_v+error_Et)/5))



if(args.plot == "1"):
    utils.plotLoss( save,
                    model_path + '%s_bp=%s'%(project_name,pb), 
                    0,
                    600,
                    0,
                    10)

elif(args.plot == "2"):
    utils.plotAll_1D(   save,
                        x_test,
                        y_test,
                        P_test,
                        rho_test,
                        u_test,
                        v_test,
                        Et_test,
                        P_pred,
                        rho_pred,
                        u_pred,